From 5824e2d0c71f3d023d2cfa793fa4cced187cfafb Mon Sep 17 00:00:00 2001 From: tjtanaa Date: Wed, 18 Nov 2020 00:55:21 +0800 Subject: [PATCH 1/4] Fixed wrong inputs: rotation, transformation matrix, inference utils, point_pillar_v2.cpp --- .gitignore | 6 + Analyse_Input_Pipeline.ipynb | 356 ++++++++++++ CMakeLists.txt | 3 +- Visualizing_Point_Pillar_Image.ipynb | 365 +++++++++++++ compute_intersection_single_element.ipynb | 389 +++++++++++++ config.py | 58 +- config_v2.py | 118 ++++ inference_utils.py | 71 +++ inference_utils_v2.py | 209 +++++++ loss.py | 11 + network.py | 3 +- point_pillars_custom_prediction.py | 157 ++++++ point_pillars_custom_processors_v2.py | 372 +++++++++++++ point_pillars_prediction.py | 104 ++-- point_pillars_training_custom_run_v2.py | 87 +++ point_pillars_training_run.py | 21 +- point_pillars_visualize_input.py | 145 +++++ processors.py | 145 ++++- readers.py | 2 +- src/point_pillars.cpp | 9 +- src/point_pillars_v2.cpp | 634 ++++++++++++++++++++++ viz.py | 0 22 files changed, 3201 insertions(+), 64 deletions(-) create mode 100644 Analyse_Input_Pipeline.ipynb create mode 100644 Visualizing_Point_Pillar_Image.ipynb create mode 100644 compute_intersection_single_element.ipynb create mode 100644 config_v2.py create mode 100644 inference_utils_v2.py create mode 100644 point_pillars_custom_prediction.py create mode 100644 point_pillars_custom_processors_v2.py create mode 100644 point_pillars_training_custom_run_v2.py create mode 100644 point_pillars_visualize_input.py create mode 100644 src/point_pillars_v2.cpp create mode 100644 viz.py diff --git a/.gitignore b/.gitignore index 0dca5a1..0107fe4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,9 @@ cmake_example.egg-info/ dist/ logs/ point_pillars.egg-info/ +logs_*/ +.vscode/ +.ipynb_checkpoints/ +archive/ +visualization/ +logs.zip \ No newline at end of file diff --git a/Analyse_Input_Pipeline.ipynb b/Analyse_Input_Pipeline.ipynb new file mode 100644 index 0000000..0d4039c --- /dev/null +++ b/Analyse_Input_Pipeline.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import numpy as np\n", + "# import tensorflow as tf\n", + "from glob import glob\n", + "import cv2\n", + "\n", + "from config import Parameters\n", + "# from processors import SimpleDataGenerator\n", + "# from custom_processors import AnalyseCustomDataGenerator\n", + "from point_pillars_custom_processors_v2 import AnalyseCustomDataGenerator\n", + "from det3d.pc_kitti_dataset import PCKittiAugmentedDataset\n", + "from tqdm.notebook import trange\n", + "# print(dir(tqdm))\n", + "\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# DATA_ROOT = \"/media/data3/tjtanaa/kitti_dataset/KITTI/object/training\" # TODO make main arg\n", + "DATA_ROOT = \"/media/data3/tjtanaa/kitti_dataset/\" # TODO make main arg\n", + "MODEL_ROOT = \"./logs_Car_Pedestrian_Custom_Dataset_single_process\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "params = Parameters()\n", + "\n", + "gt_database_dir = os.path.join(DATA_ROOT, \"gt_database\")\n", + "\n", + "# training_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT,\n", + "# npoints=20000, split='train', classes=list(params.classes_map.keys()), \n", + "# random_select=False, gt_database_dir=None, aug_hard_ratio=0.7)\n", + "\n", + "\n", + "# validation_gen = PCKittiAugmentedDataset(root_dir=DATA_ROOT, \n", + "# npoints=20000, split='val', random_select=False, classes=list(params.classes_map.keys()))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# # get the min and max (range) of every axis\n", + "# x_max = np.array(-9999.0)\n", + "# x_min = np.array(9999.0)\n", + "# y_max = np.array(-9999.0)\n", + "# y_min = np.array(9999.0)\n", + "# z_max = np.array(-9999.0)\n", + "# z_min = np.array(9999.0)\n", + "# for i in trange(len(validation_gen)):\n", + "# sample = validation_gen[i]\n", + "# x_max = np.max([x_max, np.max(sample['pts_rect'][:,0])])\n", + "# x_min = np.min([x_min, np.min(sample['pts_rect'][:,0])])\n", + "# y_max = np.max([y_max, np.max(sample['pts_rect'][:,1])])\n", + "# y_min = np.min([y_min, np.min(sample['pts_rect'][:,1])])\n", + "# z_max = np.max([z_max, np.max(sample['pts_rect'][:,2])])\n", + "# z_min = np.min([z_min, np.min(sample['pts_rect'][:,2])])\n", + "# print(x_min, x_max)\n", + "# print(y_min, y_max)\n", + "# print(z_min, z_max)\n", + "# # 1.349664568901062 78.88325500488281\n", + "# # -52.52265167236328 50.9976806640625\n", + "# # -1.0496952533721924 3.1484153270721436\n", + " \n", + " \n", + "# # 1.349664568901062 78.88325500488281\n", + "# # -52.52265167236328 50.9976806640625\n", + "# # -1.0496952533721924 3.1484153270721436" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "bc52e70dae13465eaddc01ca8fd85669", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=942.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(1, 12000, 100, 9) (1, 12000, 3)\n", + "[ 6.1949997e+00 -1.4270003e+00 -1.6610003e+00 -2.9000002e-01\n", + " -1.0499954e-02 -6.4125061e-02 -1.3750792e-03 1.1499977e-01\n", + " 1.3002157e-02]\n", + "[ 6.1959996e+00 -1.4070001e+00 -1.6590002e+00 -2.5000000e-01\n", + " -9.5000267e-03 -4.4124842e-02 6.2501431e-04 1.1599970e-01\n", + " 3.3002377e-02]\n", + "[ 6.1980000e+00 -1.3870002e+00 -1.6590002e+00 -2.6999998e-01\n", + " -7.4996948e-03 -2.4124980e-02 6.2501431e-04 1.1800003e-01\n", + " 5.3002238e-02]\n", + "[ 6.2029991e+00 -1.3670001e+00 -1.6590003e+00 -2.2000000e-01\n", + " -2.5005341e-03 -4.1248798e-03 6.2489510e-04 1.2299919e-01\n", + " 7.3002338e-02]\n", + "[ 6.2049999e+00 -1.3580000e+00 -1.6590003e+00 -3.4000000e-01\n", + " -4.9972534e-04 4.8751831e-03 6.2489510e-04 1.2500000e-01\n", + " 8.2002401e-02]\n", + "[ 6.2149997e+00 -1.3390002e+00 -1.6610001e+00 -2.2999999e-01\n", + " 9.5000267e-03 2.3874998e-02 -1.3749599e-03 1.3499975e-01\n", + " 1.0100222e-01]\n", + "[ 6.2089992e+00 -1.3180001e+00 -1.6580001e+00 -2.8000000e-01\n", + " 3.4995079e-03 4.4875145e-02 1.6250610e-03 1.2899923e-01\n", + " 1.2200236e-01]\n", + "[ 6.2230000e+00 -1.3000002e+00 -1.6610000e+00 -2.9000002e-01\n", + " 1.7500401e-02 6.2875032e-02 -1.3748407e-03 1.4300013e-01\n", + " 1.4000225e-01]\n", + "[0. 0. 0. 0. 0. 0. 0. 0. 0.]\n", + "[0. 0. 0. 0. 0. 0. 0. 0. 0.]\n", + "\n" + ] + }, + { + "ename": "NameError", + "evalue": "name 'exit' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;31m# sample = validation_gen[i]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpillars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvoxels\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0moccupancy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mposition\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msize\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mangle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheading\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclassification\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0;34m[\u001b[0m\u001b[0mpts_input\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgt_boxes3d\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msample\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalidation_gen\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpts_input\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mx_max\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mx_max\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpts_input\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/tjtanaa/PointPillars/point_pillars_custom_processors_v2.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, batch_id)\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpillars_\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 380\u001b[0m \u001b[0;31m# print(np.sum(pillars_ > 0))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 381\u001b[0;31m \u001b[0mexit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 382\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 383\u001b[0m \u001b[0mpillars\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpillars_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'exit' is not defined" + ] + } + ], + "source": [ + "\n", + "validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT,\n", + " npoints=20000, split='val', classes=list(params.classes_map.keys()), \n", + " random_select=False, gt_database_dir=None, aug_hard_ratio=0.7)\n", + "# get the min and max (range) of every axis\n", + "x_max = np.array(-9999.0)\n", + "x_min = np.array(9999.0)\n", + "y_max = np.array(-9999.0)\n", + "y_min = np.array(9999.0)\n", + "z_max = np.array(-9999.0)\n", + "z_min = np.array(9999.0)\n", + "for i in trange(len(validation_gen)):\n", + "# sample = validation_gen[i]\n", + " [pillars, voxels], [occupancy, position, size, angle, heading, classification], \\\n", + " [pts_input, gt_boxes3d, sample] = validation_gen[i]\n", + " for j in range(len(pts_input)):\n", + " x_max = np.max([x_max, np.max(pts_input[j][:,0])])\n", + " x_min = np.min([x_min, np.min(pts_input[j][:,0])])\n", + " y_max = np.max([y_max, np.max(pts_input[j][:,1])])\n", + " y_min = np.min([y_min, np.min(pts_input[j][:,1])])\n", + " z_max = np.max([z_max, np.max(pts_input[j][:,2])])\n", + " z_min = np.min([z_min, np.min(pts_input[j][:,2])])\n", + "print(x_min, x_max)\n", + "print(y_min, y_max)\n", + "print(z_min, z_max)\n", + "# 1.8940000534057617 70.76299285888672\n", + "# -39.99699783325195 40.195003509521484\n", + "# -3.204000234603882 2.0290002822875977" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# import numba\n", + "\n", + "# [pillars, voxels], \\\n", + "# [occupancy, position, size, angle, heading, classification], \\\n", + "# [pts_input, gt_boxes3d, sample] = validation_gen[8]\n", + "\n", + "\n", + "# @numba.jit(nopython=True)\n", + "# def _points_to_bevmap_reverse_kernel(\n", + "# points,\n", + "# voxel_size,\n", + "# coors_range,\n", + "# coor_to_voxelidx,\n", + "# # coors_2d,\n", + "# bev_map,\n", + "# height_lowers,\n", + "# # density_norm_num=16,\n", + "# with_reflectivity=False,\n", + "# max_voxels=40000):\n", + "# # put all computations to one loop.\n", + "# # we shouldn't create large array in main jit code, otherwise\n", + "# # reduce performance\n", + "# N = points.shape[0]\n", + "# ndim = 3\n", + "# ndim_minus_1 = ndim - 1\n", + "# grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size\n", + "# # np.round(grid_size)\n", + "# # grid_size = np.round(grid_size).astype(np.int64)(np.int32)\n", + "# grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)\n", + "# height_slice_size = voxel_size[-1]\n", + "# coor = np.zeros(shape=(3, ), dtype=np.int32) # DHW\n", + "# voxel_num = 0\n", + "# failed = False\n", + "# for i in range(N):\n", + "# failed = False\n", + "# for j in range(ndim):\n", + "# c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j])\n", + "# if c < 0 or c >= grid_size[j]:\n", + "# failed = True\n", + "# break\n", + "# coor[ndim_minus_1 - j] = c\n", + "# if failed:\n", + "# continue\n", + "# voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]\n", + "# if voxelidx == -1:\n", + "# voxelidx = voxel_num\n", + "# if voxel_num >= max_voxels:\n", + "# break\n", + "# voxel_num += 1\n", + "# coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx\n", + "# # coors_2d[voxelidx] = coor[1:]\n", + "# bev_map[-1, coor[1], coor[2]] += 1\n", + "# height_norm = bev_map[coor[0], coor[1], coor[2]]\n", + "# incomimg_height_norm = (\n", + "# points[i, 2] - height_lowers[coor[0]]) / height_slice_size\n", + "# if incomimg_height_norm > height_norm:\n", + "# bev_map[coor[0], coor[1], coor[2]] = incomimg_height_norm\n", + "# if with_reflectivity:\n", + "# bev_map[-2, coor[1], coor[2]] = points[i, 3]\n", + "# # return voxel_num\n", + "\n", + "# def points_to_bev(points,\n", + "# voxel_size,\n", + "# coors_range,\n", + "# with_reflectivity=False,\n", + "# density_norm_num=16,\n", + "# max_voxels=40000):\n", + "# \"\"\"convert kitti points(N, 4) to a bev map. return [C, H, W] map.\n", + "# this function based on algorithm in points_to_voxel.\n", + "# takes 5ms in a reduced pointcloud with voxel_size=[0.1, 0.1, 0.8]\n", + "\n", + "# Args:\n", + "# points: [N, ndim] float tensor. points[:, :3] contain xyz points and\n", + "# points[:, 3] contain reflectivity.\n", + "# voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size\n", + "# coors_range: [6] list/tuple or array, float. indicate voxel range.\n", + "# format: xyzxyz, minmax\n", + "# with_reflectivity: bool. if True, will add a intensity map to bev map.\n", + "# Returns:\n", + "# bev_map: [num_height_maps + 1(2), H, W] float tensor. \n", + "# `WARNING`: bev_map[-1] is num_points map, NOT density map, \n", + "# because calculate density map need more time in cpu rather than gpu. \n", + "# if with_reflectivity is True, bev_map[-2] is intensity map. \n", + "# \"\"\"\n", + "# if not isinstance(voxel_size, np.ndarray):\n", + "# voxel_size = np.array(voxel_size, dtype=points.dtype)\n", + "# if not isinstance(coors_range, np.ndarray):\n", + "# coors_range = np.array(coors_range, dtype=points.dtype)\n", + "# voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size\n", + "# voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())\n", + "# voxelmap_shape = voxelmap_shape[::-1] # DHW format\n", + "# coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)\n", + "# # coors_2d = np.zeros(shape=(max_voxels, 2), dtype=np.int32)\n", + "# bev_map_shape = list(voxelmap_shape)\n", + "# bev_map_shape[0] += 1\n", + "# height_lowers = np.linspace(\n", + "# coors_range[2], coors_range[5], voxelmap_shape[0], endpoint=False)\n", + "# if with_reflectivity:\n", + "# bev_map_shape[0] += 1\n", + "# bev_map = np.zeros(shape=bev_map_shape, dtype=points.dtype)\n", + "# _points_to_bevmap_reverse_kernel(points, voxel_size, coors_range,\n", + "# coor_to_voxelidx, bev_map, height_lowers,\n", + "# with_reflectivity, max_voxels)\n", + "# # print(voxel_num)\n", + "# return bev_map\n", + "\n", + "# def point_to_vis_bev(points,\n", + "# voxel_size=None,\n", + "# coors_range=None,\n", + "# max_voxels=80000):\n", + "# if voxel_size is None:\n", + "# voxel_size = [0.1, 0.1, 0.1]\n", + "# if coors_range is None:\n", + "# coors_range = [-50, -50, -3, 50, 50, 1]\n", + "# voxel_size[2] = coors_range[5] - coors_range[2]\n", + "# bev_map = points_to_bev(\n", + "# points, voxel_size, coors_range, max_voxels=max_voxels)\n", + "# height_map = (bev_map[0] * 255).astype(np.uint8)\n", + "# return cv2.cvtColor(height_map, cv2.COLOR_GRAY2RGB)\n", + "\n", + "# img = point_to_vis_bev(pts_input[0], voxel_size=[params.x_step/2, params.y_step/2, 0.01],\n", + "# coors_range=[params.x_min, params.y_min+20.32, params.z_min, \n", + "# params.x_max-40.32, params.y_max-20.32, params.z_max] )\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# fig1 = plt.figure(figsize=(10,10))\n", + "# ax = fig1.add_subplot(111)\n", + "# plt.imshow(img)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/CMakeLists.txt b/CMakeLists.txt index c21696f..0dea901 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,5 @@ cmake_minimum_required(VERSION 3.5) project(point_pillars) add_subdirectory(pybind11) -pybind11_add_module(point_pillars SHARED src/point_pillars.cpp) \ No newline at end of file +pybind11_add_module(point_pillars SHARED src/point_pillars.cpp) +pybind11_add_module(point_pillars_v2 SHARED src/point_pillars_v2.cpp) \ No newline at end of file diff --git a/Visualizing_Point_Pillar_Image.ipynb b/Visualizing_Point_Pillar_Image.ipynb new file mode 100644 index 0000000..8835ae9 --- /dev/null +++ b/Visualizing_Point_Pillar_Image.ipynb @@ -0,0 +1,365 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import numpy as np\n", + "# import tensorflow as tf\n", + "from glob import glob\n", + "import cv2\n", + "\n", + "from config import Parameters\n", + "# from processors import SimpleDataGenerator\n", + "from custom_processors import AnalyseCustomDataGenerator\n", + "from det3d.pc_kitti_dataset import PCKittiAugmentedDataset\n", + "\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# DATA_ROOT = \"/media/data3/tjtanaa/kitti_dataset/KITTI/object/training\" # TODO make main arg\n", + "DATA_ROOT = \"/media/data3/tjtanaa/kitti_dataset/\" # TODO make main arg\n", + "MODEL_ROOT = \"./logs_Car_Pedestrian_Custom_Dataset_single_process\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "params = Parameters()\n", + "\n", + "gt_database_dir = os.path.join(DATA_ROOT, \"gt_database\")\n", + "\n", + "training_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT,\n", + " npoints=20000, split='train', classes=list(params.classes_map.keys()), \n", + " random_select=False, gt_database_dir=None, aug_hard_ratio=0.7)\n", + "\n", + "validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, \n", + " npoints=20000, split='val', random_select=False, classes=list(params.classes_map.keys()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# # get the min and max (range) of every axis\n", + "# x_max = np.array(-9999.0)\n", + "# x_min = np.array(9999.0)\n", + "# y_max = np.array(-9999.0)\n", + "# y_min = np.array(9999.0)\n", + "# z_max = np.array(-9999.0)\n", + "# z_min = np.array(9999.0)\n", + "# for i in range(len(training_gen)):\n", + "# [pillars, voxels], \\\n", + "# [occupancy, position, size, angle, heading, classification], \\\n", + "# [pts_input, gt_boxes3d, sample] = training_gen[i]\n", + "# for pts in pts_input:\n", + "# # print(type(pts))\n", + "# # print(pts.shape)\n", + "# x_max = np.max([x_max, np.max(pts[:,0])])\n", + "# x_min = np.min([x_min, np.min(pts[:,0])])\n", + "# y_max = np.max([y_max, np.max(pts[:,1])])\n", + "# y_min = np.min([y_min, np.min(pts[:,1])])\n", + "# z_max = np.max([z_max, np.max(pts[:,2])])\n", + "# z_min = np.min([z_min, np.min(pts[:,2])])\n", + "# print(x_min, x_max)\n", + "# print(y_min, y_max)\n", + "# print(z_min, z_max)\n", + "# 1.349664568901062 78.88325500488281\n", + "# -52.52265167236328 50.9976806640625\n", + "# -1.0496952533721924 3.1484153270721436\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor already exists, operation skipped.\n", + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor/data already exists, operation skipped.\n", + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor/html already exists, operation skipped.\n", + "36\n", + "37\n", + "38\n", + "39\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAk4AAAJBCAYAAACwDzogAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAUyklEQVR4nO3dT8ild3n/8c/1S2wW6iLiLyGdDDXIFBq7iGUIhUBJF21SN6MLy7iQFIS4SEChiyZudCml2p3CiKEpWNMBFbMobdMguDOZCUEzmaYOTWrGGTKIBW0XKYlXF8+dekyeyVwzz59zHny9YDjnfM99n+c7fOdm3pz7Puep7g4AAFf2/9Y9AQCAg0I4AQAMCScAgCHhBAAwJJwAAIaEEwDA0J6FU1XdW1UvVNW5qnpor34OAMB+qb34Hqequi7JvyX5oyTnkzyd5GPd/fyu/zAAgH1y/R697p1JznX3vydJVT2W5FiSbcOpqnwLJwCw337S3f//anbYq1N1h5K8vPL4/DL2f6rq/qo6VVWn9mgOAABv5z+udoe9esepthn7lXeVuvtEkhOJd5wAgINhr95xOp/k8MrjW5Nc2KOfBQCwL/YqnJ5OcqSqbquq30hyPMnje/SzAAD2xZ6cquvu16rqwST/lOS6JI9095m9+FkAAPtlT76O4Kon4RonAGD/ne7uo1ezg28OBwAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBg6Pqd7FxVLyX5eZLXk7zW3Uer6j1J/j7J+5K8lORPu/s/dzZNAID12413nP6wu+/o7qPL44eSPNndR5I8uTwGADjw9uJU3bEkjy73H03y4T34GQAA+26n4dRJ/rmqTlfV/cvYzd19MUmW25u227Gq7q+qU1V1aodzAADYFzu6xinJXd19oapuSvJEVf3rdMfuPpHkRJJUVe9wHgAAe25H7zh194Xl9lKSbyW5M8krVXVLkiy3l3Y6SQCATXDN4VRV76yqd79xP8kfJ3kuyeNJ7ls2uy/Jt3c6SQCATbCTU3U3J/lWVb3xOn/X3f9YVU8nOVlVn0jyoyQf3fk0AQDWr7rXf3mRa5wAgDU4vfJ1SiO+ORwAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgKErhlNVPVJVl6rquZWx91TVE1X1w+X2xpXnHq6qc1X1QlXds1cTBwDYb5N3nP4myb1vGnsoyZPdfSTJk8vjVNXtSY4n+cCyz5eq6rpdmy0AwBpdMZy6+7tJfvqm4WNJHl3uP5rkwyvjj3X3q939YpJzSe7cpbkCAKzVtV7jdHN3X0yS5famZfxQkpdXtju/jL1FVd1fVaeq6tQ1zgEAYF9dv8uvV9uM9XYbdveJJCeSpKq23QYAYJNc6ztOr1TVLUmy3F5axs8nObyy3a1JLlz79AAANse1htPjSe5b7t+X5Nsr48er6oaqui3JkSRP7WyKAACb4Yqn6qrq60nuTvLeqjqf5LNJPp/kZFV9IsmPknw0Sbr7TFWdTPJ8kteSPNDdr+/R3AEA9lV1r//yItc4AQBrcLq7j17NDr45HABgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAoSuGU1U9UlWXquq5lbHPVdWPq+rZ5c+HVp57uKrOVdULVXXPXk0cAGC/Td5x+psk924z/tfdfcfy5x+SpKpuT3I8yQeWfb5UVdft1mQBANbpiuHU3d9N8tPh6x1L8lh3v9rdLyY5l+TOHcwPAGBj7OQapwer6vvLqbwbl7FDSV5e2eb8MvYWVXV/VZ2qqlM7mAMAwL651nD6cpL3J7kjycUkX1jGa5tte7sX6O4T3X20u49e4xwAAPbVNYVTd7/S3a939y+SfCW/PB13PsnhlU1vTXJhZ1MEANgM1xROVXXLysOPJHnjE3ePJzleVTdU1W1JjiR5amdTBADYDNdfaYOq+nqSu5O8t6rOJ/lskrur6o5snYZ7Kcknk6S7z1TVySTPJ3ktyQPd/freTB0AYH9V97aXIO3vJKrWPwkA4NfN6au91to3hwMADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMHTFcKqqw1X1nao6W1VnqupTy/h7quqJqvrhcnvjyj4PV9W5qnqhqu7Zy78AAMB+mbzj9FqSP+/u30ny+0keqKrbkzyU5MnuPpLkyeVxlueOJ/lAknuTfKmqrtuLyQMA7KcrhlN3X+zuZ5b7P09yNsmhJMeSPLps9miSDy/3jyV5rLtf7e4Xk5xLcuduTxwAYL9d1TVOVfW+JB9M8r0kN3f3xWQrrpLctGx2KMnLK7udX8be/Fr3V9Wpqjp19dMGANh/1083rKp3JflGkk9398+q6rKbbjPWbxnoPpHkxPLab3keAGDTjN5xqqp3ZCuavtbd31yGX6mqW5bnb0lyaRk/n+Twyu63JrmwO9MFAFifyafqKslXk5zt7i+uPPV4kvuW+/cl+fbK+PGquqGqbktyJMlTuzdlAID1mJyquyvJx5P8oKqeXcY+k+TzSU5W1SeS/CjJR5Oku89U1ckkz2frE3kPdPfruz5zAIB9Vt3rv7zINU4AwBqc7u6jV7ODbw4HABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ8IJAGBIOAEADAknAIAh4QQAMCScAACGhBMAwJBwAgAYEk4AAEPCCQBgSDgBAAwJJwCAIeEEADAknAAAhoQTAMCQcAIAGBJOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIaEEwDAkHACABgSTgAAQ1cMp6o6XFXfqaqzVXWmqj61jH+uqn5cVc8ufz60ss/DVXWuql6oqnv28i8AALBfrh9s81qSP+/uZ6rq3UlOV9UTy3N/3d1/tbpxVd2e5HiSDyT5zST/UlW/3d2v7+bEAQD22xXfcerui939zHL/50nOJjn0NrscS/JYd7/a3S8mOZfkzt2YLADAOl3VNU5V9b4kH0zyvWXowar6flU9UlU3LmOHkry8stv5vH1oAQAcCONwqqp3JflGkk9398+SfDnJ+5PckeRiki+8sek2u/c2r3d/VZ2qqlNXPWsAgDUYhVNVvSNb0fS17v5mknT3K939enf/IslX8svTceeTHF7Z/dYkF978mt19oruPdvfRnfwFAAD2y+RTdZXkq0nOdvcXV8ZvWdnsI0meW+4/nuR4Vd1QVbclOZLkqd2bMgDAekw+VXdXko8n+UFVPbuMfSbJx6rqjmydhnspySeTpLvPVNXJJM9n6xN5Dww+UfeTJP+93HIwvDfW6yCxXgeL9TpYrNfBsrpev3W1O1f3Wy4/WouqOuW03cFhvQ4W63WwWK+DxXodLDtdL98cDgAwJJwAAIY2KZxOrHsCXBXrdbBYr4PFeh0s1utg2dF6bcw1TgAAm26T3nECANhowgkAYGgjwqmq7q2qF6rqXFU9tO758FZV9VJV/aCqnn3j1+RU1Xuq6omq+uFye+OVXoe9sfy+yEtV9dzK2GXXp6oeXo63F6rqnvXM+tfXZdbrc1X14+UYe7aqPrTynPVak6o6XFXfqaqzVXWmqj61jDu+NtDbrNeuHV9rv8apqq5L8m9J/ihbv67l6SQf6+7n1zoxfkVVvZTkaHf/ZGXsL5P8tLs/vwTvjd39F+ua46+zqvqDJP+V5G+7+3eXsW3Xp6puT/L1bP2apN9M8i9JfnvwRbXsksus1+eS/Fd3/9WbtrVea7T8loxbuvuZqnp3ktNJPpzkz+L42jhvs15/ml06vjbhHac7k5zr7n/v7v9J8liSY2ueEzPHkjy63H80W/84WYPu/m6Sn75p+HLrcyzJY939ane/mORcfvm7JtkHl1mvy7Fea9TdF7v7meX+z5OcTXIojq+N9DbrdTlXvV6bEE6Hkry88vh83v4vyXp0kn+uqtNVdf8ydnN3X0y2/rEmuWlts2M7l1sfx9zmerCqvr+cynvj1I/12hBV9b4kH0zyvTi+Nt6b1ivZpeNrE8KpthnzHQmb567u/r0kf5LkgeVUAweTY24zfTnJ+5PckeRiki8s49ZrA1TVu5J8I8mnu/tnb7fpNmPWa59ts167dnxtQjidT3J45fGtSS6saS5cRndfWG4vJflWtt7KfGU5n/zGeeVL65sh27jc+jjmNlB3v9Ldr3f3L5J8Jb88XWC91qyq3pGt/4S/1t3fXIYdXxtqu/XazeNrE8Lp6SRHquq2qvqNJMeTPL7mObGiqt65XGSXqnpnkj9O8ly21um+ZbP7knx7PTPkMi63Po8nOV5VN1TVbUmOJHlqDfNjxRv/CS8+kq1jLLFea1VVleSrSc529xdXnnJ8baDLrdduHl/X7+6Ur153v1ZVDyb5pyTXJXmku8+seVr8qpuTfGvr32OuT/J33f2PVfV0kpNV9YkkP0ry0TXO8ddaVX09yd1J3ltV55N8Nsnns836dPeZqjqZ5PkkryV5wCd+9tdl1uvuqrojW6cJXkryycR6bYC7knw8yQ+q6tll7DNxfG2qy63Xx3br+Fr71xEAABwUm3CqDgDgQBBOAABDwgkAYEg4AQAMCScAgCHhBAAwJJwAAIb+F/eiKo2m6X3aAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "[pillars, voxels], \\\n", + "[occupancy, position, size, angle, heading, classification], \\\n", + "[pts_input, gt_boxes3d, sample] = validation_gen[9]\n", + "# print(pillars[0].shape)\n", + "# print(voxels[0].shape)\n", + "# print(occupancy[0].shape)\n", + "# # print(occupancy[0,0,:])\n", + "# print(angle[0].shape)\n", + "\n", + "# for i in range(len(occupancy)):\n", + "# print(i, \"l \")\n", + "# print(np.sum(occupancy[i][:,:,0] == 0))\n", + "# print(np.sum(occupancy[i][:,:,0] == 1))\n", + "# print(np.sum(occupancy[i][:,:,0] == -1))\n", + "# print(np.sum(occupancy[i][:,:,1] == 0))\n", + "# print(np.sum(occupancy[i][:,:,1] == 1))\n", + "# print(np.sum(occupancy[i][:,:,1] == -1))\n", + "# print(np.sum(occupancy[i][:,:,2] == 0))\n", + "# print(np.sum(occupancy[i][:,:,2] == 1))\n", + "# print(np.sum(occupancy[i][:,:,2] == -1))\n", + "# print(np.sum(occupancy[i][:,:,3] == 0))\n", + "# print(np.sum(occupancy[i][:,:,3] == 1))\n", + "# print(np.sum(occupancy[i][:,:,3] == -1))\n", + "\n", + "\n", + "fig = plt.figure(figsize=(10,10)) \n", + " \n", + "ax = fig.add_subplot(111) \n", + "ax.imshow(occupancy[0][:,:,3] == 1, cmap = plt.cm.gray, \n", + " interpolation ='nearest') " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor already exists, operation skipped.\n", + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor/data already exists, operation skipped.\n", + "WARNING: /home/tan/tjtanaa/PointPillars/visualization/custom_processor/html already exists, operation skipped.\n", + "32\n", + "33\n", + "34\n", + "35\n" + ] + } + ], + "source": [ + "\n", + "import numba\n", + "\n", + "[pillars, voxels], \\\n", + "[occupancy, position, size, angle, heading, classification], \\\n", + "[pts_input, gt_boxes3d, sample] = validation_gen[8]\n", + "\n", + "\n", + "@numba.jit(nopython=True)\n", + "def _points_to_bevmap_reverse_kernel(\n", + " points,\n", + " voxel_size,\n", + " coors_range,\n", + " coor_to_voxelidx,\n", + " # coors_2d,\n", + " bev_map,\n", + " height_lowers,\n", + " # density_norm_num=16,\n", + " with_reflectivity=False,\n", + " max_voxels=40000):\n", + " # put all computations to one loop.\n", + " # we shouldn't create large array in main jit code, otherwise\n", + " # reduce performance\n", + " N = points.shape[0]\n", + " ndim = 3\n", + " ndim_minus_1 = ndim - 1\n", + " grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size\n", + " # np.round(grid_size)\n", + " # grid_size = np.round(grid_size).astype(np.int64)(np.int32)\n", + " grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)\n", + " height_slice_size = voxel_size[-1]\n", + " coor = np.zeros(shape=(3, ), dtype=np.int32) # DHW\n", + " voxel_num = 0\n", + " failed = False\n", + " for i in range(N):\n", + " failed = False\n", + " for j in range(ndim):\n", + " c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j])\n", + " if c < 0 or c >= grid_size[j]:\n", + " failed = True\n", + " break\n", + " coor[ndim_minus_1 - j] = c\n", + " if failed:\n", + " continue\n", + " voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]\n", + " if voxelidx == -1:\n", + " voxelidx = voxel_num\n", + " if voxel_num >= max_voxels:\n", + " break\n", + " voxel_num += 1\n", + " coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx\n", + " # coors_2d[voxelidx] = coor[1:]\n", + " bev_map[-1, coor[1], coor[2]] += 1\n", + " height_norm = bev_map[coor[0], coor[1], coor[2]]\n", + " incomimg_height_norm = (\n", + " points[i, 2] - height_lowers[coor[0]]) / height_slice_size\n", + " if incomimg_height_norm > height_norm:\n", + " bev_map[coor[0], coor[1], coor[2]] = incomimg_height_norm\n", + " if with_reflectivity:\n", + " bev_map[-2, coor[1], coor[2]] = points[i, 3]\n", + " # return voxel_num\n", + "\n", + "def points_to_bev(points,\n", + " voxel_size,\n", + " coors_range,\n", + " with_reflectivity=False,\n", + " density_norm_num=16,\n", + " max_voxels=40000):\n", + " \"\"\"convert kitti points(N, 4) to a bev map. return [C, H, W] map.\n", + " this function based on algorithm in points_to_voxel.\n", + " takes 5ms in a reduced pointcloud with voxel_size=[0.1, 0.1, 0.8]\n", + "\n", + " Args:\n", + " points: [N, ndim] float tensor. points[:, :3] contain xyz points and\n", + " points[:, 3] contain reflectivity.\n", + " voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size\n", + " coors_range: [6] list/tuple or array, float. indicate voxel range.\n", + " format: xyzxyz, minmax\n", + " with_reflectivity: bool. if True, will add a intensity map to bev map.\n", + " Returns:\n", + " bev_map: [num_height_maps + 1(2), H, W] float tensor. \n", + " `WARNING`: bev_map[-1] is num_points map, NOT density map, \n", + " because calculate density map need more time in cpu rather than gpu. \n", + " if with_reflectivity is True, bev_map[-2] is intensity map. \n", + " \"\"\"\n", + " if not isinstance(voxel_size, np.ndarray):\n", + " voxel_size = np.array(voxel_size, dtype=points.dtype)\n", + " if not isinstance(coors_range, np.ndarray):\n", + " coors_range = np.array(coors_range, dtype=points.dtype)\n", + " voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size\n", + " voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())\n", + " voxelmap_shape = voxelmap_shape[::-1] # DHW format\n", + " coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)\n", + " # coors_2d = np.zeros(shape=(max_voxels, 2), dtype=np.int32)\n", + " bev_map_shape = list(voxelmap_shape)\n", + " bev_map_shape[0] += 1\n", + " height_lowers = np.linspace(\n", + " coors_range[2], coors_range[5], voxelmap_shape[0], endpoint=False)\n", + " if with_reflectivity:\n", + " bev_map_shape[0] += 1\n", + " bev_map = np.zeros(shape=bev_map_shape, dtype=points.dtype)\n", + " _points_to_bevmap_reverse_kernel(points, voxel_size, coors_range,\n", + " coor_to_voxelidx, bev_map, height_lowers,\n", + " with_reflectivity, max_voxels)\n", + " # print(voxel_num)\n", + " return bev_map\n", + "\n", + "def point_to_vis_bev(points,\n", + " voxel_size=None,\n", + " coors_range=None,\n", + " max_voxels=80000):\n", + " if voxel_size is None:\n", + " voxel_size = [0.1, 0.1, 0.1]\n", + " if coors_range is None:\n", + " coors_range = [-50, -50, -3, 50, 50, 1]\n", + " voxel_size[2] = coors_range[5] - coors_range[2]\n", + " bev_map = points_to_bev(\n", + " points, voxel_size, coors_range, max_voxels=max_voxels)\n", + " height_map = (bev_map[0] * 255).astype(np.uint8)\n", + " return cv2.cvtColor(height_map, cv2.COLOR_GRAY2RGB)\n", + "\n", + "img = point_to_vis_bev(pts_input[0], voxel_size=[params.x_step/2, params.y_step/2, 0.01],\n", + " coors_range=[params.x_min, params.y_min+20.32, params.z_min, \n", + " params.x_max-40.32, params.y_max-20.32, params.z_max] )\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlIAAAJCCAYAAAALCSnoAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXRc5X0//vdz7+yj2SSNdsmSbEm2bGHZBtksJmxmCWDiBIfklJMvBErSEJJfNtqmJ7Rp+z35Nk3PKWkCSWhCSEhCk6ZpGhIwayDY2GCD8SJbeJFkW/uu2dfn94c8txpLtqTRjEYjv1/n3AOauffOM+NZPvdZPh8hpQQRERERzZ2S7QYQERER5SoGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpylggJYS4WQjRJoQ4LoT4q0w9DhEREVG2iEzkkRJCqADeB7AFwBkAbwP4uJSyNe0PRkRERJQlmeqRagFwXEp5UkoZBvAMgDsy9FhEREREWaHL0HnLAZye9PcZABvPt7MQgunViYjoomUymRAKhcBqI1k3KKV0z+WATPVIiWluS3p3CCEeEELsFULszVAbiIiIFh1VVVFUVJR0G4OoRaNzrgdkqkfqDIDKSX9XAOievIOU8gcAfgCwR4qIiC4uQiT3NzCIyl2Z6pF6G0CdEKJGCGEA8DEA/5OhxyIiIsoZsVgMQ0ND2W4GpUlGeqSklFEhxGcB7ACgAviRlPJwJh6LiIgo10Sj0Ww3gdIkI+kP5twIDu0REdESpqoqFEVBJBJJut1oNCIUCmWpVTSNfVLKS+dyADObExERLYDpOi4WQ2cGzU+mJpsTERHRWbFYbNrbw+HwAreE0o09UkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCIGUkREREQpYiBFRERElCJdthtAREuP3W7HlVdeCZ/Ph9dffz3bzSEiyhgGUkSUdjqdDoWFhXA6ndluChFRRnFoj4jSyuFwoKGhAR6PB88//zwAYP369RBCZLllRETpx0CKiNLK7/ejq6sLgUAAIyMjAICOjg5IKbPcMiKi9GMgRURpZTKZUFxcDKPRqN02PDycxRYREWUO50gRUVpZrVZUVVVBUXidRkRL34zfdEKIHwkh+oUQhybdli+EeFEIcezsf12T7vtrIcRxIUSbEOKmTDWciBafvLw82Gw29PT04PTp09luDtGiV1ZWlu0m0DzN5pLxxwBuPue2vwLwspSyDsDLZ/+GEKIRwMcArD57zGNCCDVtrSWiRctoNGLZsmVwu93YtWsXdu/ene0mES16y5cv50KMHDdjICWlfB3AuRMc7gDw1Nn/fwrAhybd/oyUMiSlbAdwHEBLmtpKRItYUVERiouLs90Mopyyd+9eLsTIcalOYiiWUvYAwNn/Fp29vRzA5P78M2dvm0II8YAQYq8QYm+KbSCiReTMmTPo7OxELBbLdlOIcoIQAlu2bGGPVI5L92Tz6d4N04baUsofAPgBAAghGI4T5bjy8nKEw2Hs2bMn200hIlowqfZI9QkhSgHg7H/7z95+BkDlpP0qAHSn3jwiyhVnzpyBqqq45ZZb0NLSAr1ej+bm5mw3i2jRklLC5/Nluxk0T6kGUv8D4P+c/f//A+C3k27/mBDCKISoAVAH4K35NZGIckkkEoHH40EkEsF7772X7eYQLVpCCFit1mw3g+ZpxqE9IcQvAFwDoFAIcQbA3wL4fwB+KYS4D8ApANsBQEp5WAjxSwCtAKIAHpRScsIE0UUiGo0iGAxCp9PhkksuQTAYxPvvv5/tZhEtOjqdDjfccAN6e3s52TzHzRhISSk/fp67rj/P/v8XwP+dT6OIKDf19vYiHo+jrKwMBw8e5A8E0QXEYjHE4/FsN4PmiZnNiSgtSktLUVJSArPZDCklysrK0NHRgbq6Oni9XvT09GS7iUSLgslkwubNm2EwGNDW1pbt5tA8MZAiWmLKy8sRiUQwODi4oFe7Ho8HAGCz2WC32zEyMgIpJYaGhhAOhxesHUSLnU6nQ3FxMcbGxrTPjdFoRDgcZi9uDmIgRbTEuFwuBAIBDA0NLejjer1eeL1ejI6OIj8/H1JKCCFgNBqh0+ng9XoXtD1Es1FWVoZYLIa+vr6stkNKySAqRzGQIlrkTCYTAEBRFASDwaReJr1eD5vNhlgshrGxMQATPUJWqxXxeBxdXV0L2htkNpuRl5enTTgHAIPBgGg0umBtIJoLo9G44O/PeDwOv98Pv98PYKKHir22uYuBFFGWqepEOcpYLAZVVWGz2SCl1AIjo9EIIQT0ej0ikUhSIKXT6VBQUIBQKKTtrygKli1bhvLycgghMD4+DiEEgsGgNoyQCWazGWVlZdqQxeHDhwEAnZ2dGXtMovlwOp3o7e1FIBBYsMfU6/VwuVzo6OjQPst6vZ4XGzmMgRRRGthsNvj9fiiKAkWZSM+W6KqPRCJT9k2QUsJqtUJKCb/fD6vVioaGBvh8Puzbtw8AtABpOoFAAMeOHUu6befOnbj55psRi8WwcuVKbYito6MDra2t6XrKUxQVFWlBHVciUS6or69Hb28vTp06BZ1OB4PBoPUSZUpeXh4aGxuxe/duVFZO5K8OBoMZfUzKLLEYxmRZIoZyldlsBgB8+MMfxhtvvAEhBOx2O+LxOGKxGEKhEE6ePJl0zLZt2wBAC7ImBx3RaBT9/f1pTWRZX1+P8fFx9Pb2pu2c51JVFUIIxONxBlGUk5YtW4a6ujq89NJLGX8ss9mMa6+9Fl1dXXjvvfdgtVqZ4Xzx2CelvHQuBzCQIpqHhx9+GKqqIhwOIxwOY8eOHRdlAsqmpia4XC6cPn0a7e3tEEJw4izllKqqKtTV1eHll1/O+GMZDAasXr0a7777bsYfi+aMgRQRLbympiaMjo5ieHgYxcXFcDgc8Pv9zJFDNA2TyYQrrrgC3d3dOHr0aLabQ8nmHEhxjhQRzZvP50NZWRn0ej1OnjwJvV6PoqIiVFdXo6OjI9vNI7qgxsZGOBwOvPnmmxl/rMLCQmzZsgWjo6MMopYI9kgR0bwJIbThvMR3ihACADjER4ueoigQQiAWy2xp2MrKSjQ2NuLdd99FRUUF3nnnnYw+HqWEPVJES5ndbkcoFNJWBy6WCarTJRNkAEW5YqEWSHR3d2N4eBgGg2HRfHZp/pRsN4CIZs/n80Gv10MIsaC5b4ho/txuN9auXatl/KelgT1SRDkkFoshGAxCSsk0A0Q5JpGGpLy8HKdOncp2cyhN2CNFlGOi0WjG53IQUfr5/X4MDAxkpSwNZQ4DKaIlzO12a7X6iCj7Er3KFosFAFBRUZHlFtF8MZAiWsKqq6uTStIQUXbk5eXB5XIBmAimEr3KBQUF2WwWpQEDKaIlLBwOJw0D6nQ67UqYaCnT6XSwWq3ZboamqKgIK1asgNVqhRBCq+mXznJQlB0MpIiWsNbWVoyOjgKYyOtUUFCAVatWZblVRJmXn5+PDRs2ZLsZmlOnTqG3txf19fUYHBzkqtslhIEU0RKkKBMf7fXr18PtdkNRFBQWFsJkMmHfvn1Zbh1R5qmqCoPBkO1maOrr61FdXY39+/cjLy8v282hNGJmc6Il6Oqrr8aePXsQCoWwbt06FBcXQ1VVDAwM4K233sp284guOo2NjVizZg26urqwc+fObDeHzo+ZzYlowuWXX459+/ZBr9fj/fffx8mTJ2d1nNvths/n0+ZwEOWStWvXYvny5Th16hT27t2b7eYkiUajCAaD2W4GpRl7pIiWmFtvvRU6nQ4ejweKosBkMqG1tRUejwd5eXlob2/PdhOJMma6uo+LgRACxcXFKC8v5/D64sYeKaKL3UsvvQTgf2vdrV+/HgAwNDSE4eHhCx5bVFSE0dFRhMPhzDaSKEOklCguLobL5cKRI0ey3RyNlBJ+vx9DQ0PZbgqlGQMpoiUmFAol/R0Oh7F8+XIoioLjx49rtwshUF5eDmCiht/o6Ciqq6tx7NgxBlKUsyorK1FTU7MoiwJ7vV6u1luCOLRHtMTZ7XYYjUYEAgEEg0GYTCZ4vV4AgMViQUlJCc6cOYNwOAyXywWv14tIJJLlVhOlxmKxwGw2IxqNYmxsLNvNodzDoT0iSjY+Pq79v6IoiEQiEELA4XBgdHQUg4ODsNvtGBoaQnFxMSKRCAMpyll+v58LJWhBMY8U0UUkHo9rQ39SSjidToyPjyMejwOYWFW0GHqpiWZSW1ub7SbMid1uR2FhYbabQRnAQIroIiSlxPj4ONxuNwBgeHgYUkocP34843NLysrKWEiZUqKqKoqLiwFMBCa5xOVyoaSkJNvNoAxgIEW0BOl0sxu1j0ajGW7JVDU1NYuqBhpNb7bvoYVgNBpht9uh1+tRVVUFADh9+nSWWzU3wWCQQ45LFAMpohylqiqMRuO097lcLiiKopWKURQFqqpCCKHtI6W8YE4pIYR2fDrt3LmTS8AXOVVVMxbspvK+Ki4uxvr16xEKhXDgwAEIIbBp06aMtC9TBgYG0NnZme1mUAYwkCLKIYqiQKfTQVEUlJaWYu3atdPul5jzZLfbYTAYYLPZ4Ha7YbPZLnj+RKAlhIDNZoPD4UipnZMDNsoNOp1O64WKxWLaird0/1taLJY5v68ikQi8Xi9MJhNaWlogpcTvf//7tLYr0xwOhzaUTkvL4um7JaIZNTU1we12o729HSdOnMCZM2em3S/R4zM6OgpgIpfUbJaCO51OeL1e5OXlIRaLYWRkZM5tNJlMsNvt6O/vn/OxlD2JYV6DwQCz2ay9X/Lz8zE0NASj0TglR9lc2e12CCHm/L7q6elBT08PLBbLeXthF7tUPkuUG9gjRZRD9Ho9FEVBUVGRlkwzcfvVV1897TG1tbVwu91wu90zrnQqKCiAwWDAyMiIljbBaDRqE3xnIxgMMojKYecG3YmgfL5BFDCRimOuuZ0KCgpQVlYGYCK1QSJzP9FiwR4pohwSCASSJojn5+djw4YNiEQieP3117F69WocPnw46ZihoSGEw2EEAgEMDAxMe97CwkKthMy5Wc1DoRD6+vrS/2Qop+l0OkgpEYvFkm43GAxpzYzP+XS02LFHiiiHmEwmSCnR09MDRVGwYsUKvPrqq9i1axfy8/Nx9OhRVFdXJ81rGR8fn7EsxdDQEKSUGBkZYTLOi9TkOVKzEY1GpwRRAGYMooxG44wT2RsaGpCXlzftfYqioK6ubtbtnK2SkhJcdtllaT8vLX0MpIhyxIoVKyClhBAC9fX1WLVqFSwWC1wuF8LhMEZHR1FWVoaurq6kpJqzSbCZ2IfJOC9eQoiMLxJQFAWxWGzGNADt7e1J++Tl5WnDe/F4HB0dHdMeV1FRgYKCgmkf94Mf/OAFH3NgYAAHDx6c4RkQTcWhPVrSCgoKMDIyoq1iy2VWqxUGgwEejwcGgwE+nw/d3d2w2Wyora1Fe3s79Ho93G43otEoBgcHl8TzpoUx25xier0e0WgUiqJASjnr95iqqtr+iYBqsubmZvj9fpw4cWJKr1YgEEjqKT1fr+ng4OC0vWTxeBxvv/32BdsXi8WmPZZoJuyRoiXN7/cviV6WyspKmM1m9PT0IBQKQa/XIxQKYXBwEAMDAzh9+jR8Pp/2Q+Dz+ZbE86aFI6Wc1XsmFotpAdFc3mPxeBx6vR56vR5SShgMhqQ0CEajEUNDQ1i1ahWMRiPy8vJgMBi0x5zNZPdgMHjeIOt88wOJ5ouBFC1pgUBgSQQUbrcb/f396O3tRTweh8fjQSQSQXl5OTweD7q7u+Hz+bQl1kslgKTFJ9EDNdvAKyExMV0IAb1eDyEEzGazdn9nZydKSkpQVlaGVatWwWw2a49lNBpTzmmmKIpW466+vp45zijtGEhRTpjLF6DFYkn671Kgqiq6u7u1q/LED0ziBylRu258fBzBYHBRBlHl5eUZyZROC2e+/36JotiJ4cFAIKCds7e3F8uXL8fIyAh0Oh1CoRBsNhvsdruWiDZhcgA2G4ljc6HG4+bNm1FRUZHtZtAc8FuNcsLy5ctnDKSEELBYLNpkU4vFsqSuPh0Oh/aDUFRUBJfLhfHxcbhcrqQkhcPDw9lq4gWVlpYykMpxqqrO+xzxeFybjzQ2NpZ0zv7+fhw7dgzvvPMOxsfH4XA4UFxcDFVVtTQI5/ZkOZ3OC76v4vE4ent7AQAHDhyAw+FYtN8Lzc3N2LJlizaxnnIDv9UoJzz33HMzTmpVVRUlJSVasDE4OLgoe2bmymg0QgiBqqoqmM1mSCkRjUbhdDpx6aWXoqqqSktyaDQaodPp0vKDl2579+7NSpFkSp90pMaIRCJJ6Tgmf0ZbW1uThuM7OjoQDAaTks9KKZMuFpYvXw69Xj/tYwkhpmRCX7FixaL8fADAAw88gGeeeQZvvfVWtptCcyAWww+NECL7jaCck/gyTEyw1ul0KC8vh5QSp06dymbT0qqhoQFOpxOHDx9GIBBAXV0dKisrtcm5b7/9NqLRKIQQaGhogM/nQzAYxNDQ0JxW7amqylVLlFaKoszqPajX6xGPxxGPx1FTU4P+/v60LJgwmUyora1Fa2vrvM5DF5V9UspL53IAAynKCdN9Ia9ZswbhcBjvv/9+llq1cFauXAmHw4Hx8XHk5eXBZrNpPW9erxe7du3S9i0qKsLAwMCcf4QaGhrQ3t6e1qzUdHErLS1Ff3//rAL0hoYG9Pf3Y3R0FNdffz0OHjyY9D4+9/082yCNaI7mHEhxaI8WDafTCZfLNe19t99++5R5EIcOHboogqiEcDgMu90Ol8sFh8MBu90+7YT6/v7+lK7k29raGERR1rS1tWFkZARSSrz00ktoaGjAhz/8Ydxyyy1oaGhI2lcIgY985COLdq4TXVzYI0WLwhVXXIHh4WEcPXr0vPvcdtttePHFF9NSPDVXNTY2wuFwwGKxwGg0oq+vD/v27ZvxuES5Da/Xm+kmEs3IZDLBZrOdN7fTVVddhaNHj2JwcHCBW0bEHinKQZs3b0ZVVdWMK7qee+65pCCqpqYGLS0tWL16Ndxu95we0+12Y/PmzSm1N5sikQjMZjPy8vJgNptnneLB6/UyiKJFo7CwcEpdu+bmZq2HadeuXQyiKGcwkKKsqqqqgt1uT1pFs2LFCtx3330wGo246aabtNsnz7Ooq6tDfn4+IpEI/H7/nCvEJzIzGwwGXHfddfN/IgtMCAGPx4NAIIANGzZMuf/KK6+E0WjE+vXrp609dq4bbrgBer0+KcUC0WxZrVbU19drf0+XPFNVVe1iqaenB6+88goMBgNaWloAAIcPH8bmzZu1iecbN25EZWXlwjwBonlgIEVZUV9fj2XLlmFoaAjBYBChUAgrVqzAzTffjKKiIjz77LMIh8PYs2fPtMerqop4PI6BgQF4PJ6k5dGzMTo6ivfeew+RSGRWQ2OLQX19PRwOByKRCAYHB+HxeGAymWCxWLBq1aqkfQ8cOIBwOIy2tjYtNUKCyWTC8uXLk25LrPzzer1cuUdzJoRICsC9Xi/cbnfSHKbJ/x+LxbRyLkePHkV1dTUikQhMJpO2XyKVB02vsrJy2jQOFosFJSUlWWjRxYuBFC24xHL+kZER+P1++Hw+eL1eRKNRjI6Oor+/H0VFRZBSYnR0dNpzSCkxNDSEgYEBjI2NzTkJZSJokFJOCTQWq76+Puh0OvT19Wk192KxGAwGA3p6eiCEQGNjIwDA4/GgtLQURUVFWr2yhEgkMmVuytjYmFbCYzHMm6TcZjab4fF4ZkyWKaWE1+vVfvj379+fllxVF4Ph4eFpVy2Gw+Gc+U5bKhju04ISQsDtdiMej6OgoAA2mw0AEAqFtDwyer1+xi+C/v5+rScLOH+iQLvdDq/Xm9PLpJuamjA6Ogqr1QqTyQSDwYDBwUEt83Ntba0WcE5+3UwmEwKBwJQkmLFYDOPj4wv6HGhpC4VCWvZwYOJCJRgMQlVVLTCfrqdTp9Nh+fLlOHPmDICJz3XCiRMn4PF4Mtzy3OXz+aa9PRqNMvHtAmOPFC24xBdraWkpdDqd9qULTMytMBqNMybUHBkZScqOPB2r1boklkcLIVBbW4tly5ZBURTY7XZYrVbtdevs7ISiKCgrK0NXV5d23MjICAYGBmaV0qC6uho1NTXnzRBNdCGJbPsJqqpCVVWttl5in8m9nQaDAVVVVXC5XFogNVlXV1fGAv7S0lK+1ylt2CNFCy4SiUBRFIyNjaG7uxvxeBwulwsGgwFCCAghkJeXh3g8Dr/fP2W8PzEE5fF4IISAqqrTXp2ZTCYMDw/n/FDVgQMHcNlll2F8fBx6vR6KosBqtcJoNEJRFHR1dcFkMsHpdKK7uxtmsxmBQAAjIyOzfozCwkIIIdDT08OhFZozVVVhNpu1wMdms2lBf29vb1JvlMlkgl6vh5QS5eXlSb1OpaWl6OvrS+pBzsvLg06nO+8wfyocDgdGRkb4Xqe0YCBFC0pKqQU9bW1tKCgoQG9vrxYcOZ1OFBUVaZXhT548OSUZXywWg6IoOHbsGGKxGEwm07SB1FxX8i1mb7/9NkwmE1auXKlldDaZTDAajaioqEBvby+OHz8OYGI4MxaLzSm55t69ezPVdLoIhEIh9PX1aX+Hw2GYTCa4XC54PJ6k4XWbzQaHw4Guri709vaitrZWG7LeuHEj/vCHPyS9dwsLC2G1WtMaSF0oXx3RXDEhJ2XVPffcg9dffx2nTp1CNBpFdXU1mpqaEA6H0dnZqQUH547519XVYXR09LwJ/ZYag8GA+vp6VFZWwuPxwGAwQFEUHD9+HKdPn0ZhYWHSkGiiNEdieCWX54hR7nE6nRgdHUVxcTEKCgpw+vRpbXGHEAJ6vR4mkwnARKqOzs5OreD4q6++yp4iyiYm5KTc8uMf/xjr169HdXU1ACAej2uTJRMTUWtra6ccd+zYsSlBlKIoUFVVGx5cSjZu3Ii8vDwEg0E4nU7thyoROMVisaR5ZT09PYjFYli7du2s8kgRpVOi92h4eBitra0oKSmB3W4HMLE8P7HIZNWqVdixYwf0ej1aW1u1uZJEuYQ9UrSoVFRUoK6uDl6vF6OjoygpKUFHRwdOnz59weMsFgvq6+tRWlqKEydOQEqJY8eOLVCrF86mTZsQDodhNpvhdrvR09Nz3lxbRIuR1WpFWVkZ3G43+vv7tV5nANiyZQtee+01OBwOjI2NsfYjZQN7pGhxUxQF3/jGN7Qeo/vuuw+lpaXa/fF4HFJKKIqCkydPJq1CO1dlZSWam5vR3NyM8vJy7N+/Hzt27IDBYFiSQVRdXR0MBgPKy8tRXFwMn88HVVVx4403QlEULFu2DLfeemu2m0l0QdFoFEIImM1mHD9+HCaTSatg8OKLL8JsNmNoaIhBFOUMBlK0YEwmE772ta8l3fbkk0+ip6dH+zuxpN9gMGDt2rVob28/b2+UwWCAxWKB3+/Xrmrj8TgOHz6cuSeRRVarFXa7HUII+P1+xGIxGI1GLQdXZ2cnnnvuOQBAeXk5ampqpiTjJFoozc3NWjJOm82mpRuoqKhAQUEBXnnlFQBAMBjEiy++qB03NjbGOX0XmerqahiNxqTbrFZrzpQI4qo9WhAulwv33nsvvvWtbwH431xS535hVlVVaVenXq8X77zzzpRzrV27Fnq9HqqqQq/Xw2AwwGQyoaysTBvWW4oOHTqEWCymLS1XFAWRSATDw8PQ6/W4/PLL0d/fj6NHj2qZzlnuhbLl0KFDqKmpgdlsxsmTJ1FbWwur1YqBgQHs3bsXqqrC5XJhYGBgxsCpsLAQxcXFS/Yi6WJ36tQp3H777XjvvffQ0dEBYKKw/IYNG2ac1rEYsEeK5q2srAyVlZUoKys7b20sRVGQn5+PT37yk/D5fHj44Yen7S05fPgwfvazn+GNN97Arl27pj2XXq+HxWJBXl4epJQwGAxobGxMGiJcitxuNzo7O7Vhj0AgACEECgoK0NLSgvz8fESjUWzatAnLly9fchPuKbdEo1GcPn0ax48fRyAQgN1uRzAYRDQaxdq1a3HVVVchFAph06ZNACbSdlxxxRXaar6E0tJSuFwunDhxAsDEd8nKlSvn3b6amhpYrdZ5n4fmLx6P449//CO6u7u127q6uvDSSy8BmMhT9md/9mcAgE996lPae+T666/HihUrFr7B5+Bkc5o3k8mE8vJy9Pb2wu/3T+kRKiwsxM0336xVh+/u7sYf//hHvP/++9Neiep0OrhcLpSVlcHj8aEClM8AACAASURBVODkyZNJ99vtdjQ0NCA/Px/ARFmJ9vZ2CCHmlIQy15hMJlitVi2LtNvtRlVVFQKBAFwuF6SUCAaDOHjwIEKhEHw+35LtnaPFrbKyMqknYfny5XA4HMjLy8Po6ChOnz6tJZUdHR3Vsp4n3s8dHR3aHCmTyYSCggIYjUbtu8Bms827fIzVakV+fj7GxsZYMmmRE0KgsrISp06dQm1tLTo6OhCPx1FUVAS/3w+v15vOh+Nkc1p4wWAQ/f390wZRABAIBLRAR1EU1NTUoKamBrfddhu2bdumBUQJifQF4XAYVqsVxcXFSfePj4/j5MmTOHz4MFpbWzE+Po7KykoUFRVl9HlmWzAYhN/vR2FhISorK5Gfn6/9OAWDQS3Le35+Pnw+H4qKilgGg7Li3CBndHQUnZ2dWgLdkZER9PT0YHh4GCMjI1i+fDlsNhs6OzsRiUQgpURRURGsVqv2vldV9bznn636+nqtp9bn82FsbEyr10mLl5RSS+9y8uRJ7QK8v78/3UFUShhIUVp4PJ4pQVRJSQk2bNiAiooKHD16FEIIrTelpaVF6zU5dx5PIst5OBxGOByedp7P0NAQzpw5g9OnT6O/v1/bd6kLBAIIBAIIh8OQUsJoNMLhcMBkMsFkMiEejyM/P18rs5P48bFarbBarVBVdcrQSbo4nU64XK6MnJtyy7lZyIeGhjA0NIRQKISSkhLU19cjFothZGQEQgjk5+cjEAhgdHRUSzabmA/odruhKEpSQeNUnZvoc3x8nIEUzRsnm9OsORwOWCyWpFV2F1JRUYHVq1ejtbUVg4ODiEQiOHTokPbj/vLLL09bpVxKiVAoBK/Xq12RXojH40FbW9vcn1COSgyZuN1u6HQ6GAwGVFZWQkqJSCSCaDSKqqoqOBwOxGIxBAIBWK1W+P1+hEKhpCv7dErUSiQ6n3g8juLiYtjtdoTDYfT29iIUCqGnpwc6nU4rf+R2uzE4OIjS0lIYjUaMjY1Ne77Exdlstbe3p+upEGkYSNGsVVVVoaKiYsZAKlGuRFEUdHR0oKOjA5FIBMeOHcOvf/1rlJWVAcB5V5SpqgqDwYCBgQGoqgq3270oum8Xm4GBAQwMDMBqtcJisQCYmDui0+lQUlICKSXKysrg8/ng8XgQj8dhNBqnrUuYDunoMaClbWxsDENDQ6ioqMA111yD3/72twgGgzhw4ABaWlowOjqKY8eOoampCT6fD/F4HB6PB7FYDMXFxRgdHU363kj0XCW4XC7k5eXB5/NheHg4G0/xoiOEQEVFRU6srssUDu3RrBiNRuh0OsTj8aSVLomyD5Nddtll+MhHPgKDwYCuri5s2rQJXq8Xzz77LIQQuOGGG3DnnXeed/5OOBzG8PAw4vE4uru7WTZiBj6fD88//zx27NiBwcFBDAwMaIFTNBqFlBKqqqKsrAwrVqyAEEIrEpsYEgSg/Xvo9XooigKdTgeTycR5VjRviqJACIETJ05g7969GB4enjI38q233oJer4cQAgcPHsTIyAi6urrg9XoRCoXQ1tYGk8kEIQRsNhucTueU3EOXXHIJbrvtNmzatAl5eXnTtsXpdM6559Rut2s5sRIMBsNFuepPp9Mlve46nQ533XVXFluUfQykaEY6nQ6XXXYZbDYbTpw4gVtuuUW7/c4775yy/3PPPYejR49CVVWcOnUKzz//PGw2G+666y5IKfHMM88gGAzOukt+tkOJFzspJV599VXs3LlTy82TCKT0er0WINntdjQ3N6O+vh4tLS3YuHEjhBBYsWIFjEYjamtrUVhYiPLycqxfv15bbUmUKpfLpaU7OXXqFPbs2QOLxYLPf/7zSWlQQqGQFuQoigKn04na2lpUVFQgFovB7/fDZDLh9ttvx6c+9SlcffXVWsoVvV6PP/3pT3j88cfR19eHrVu3Tpti5ROf+MSUYCqRk+58PvrRjyYFZqqqYtWqVbj22mvn98LkoIqKCjQ1NWmvVzwex6FDh7LcquxiIEUzuu6667B69WqsWrUK1113nRYARaNR/OhHP5r2mHA4jNWrV+P+++/XypYkekYeeOABPPHEE6zwnkH79u3D+Pg43n77bXi9XtjtdphMJrjdbjQ1NeHdd9+F1WrF+Pi49oVYVFQEnU6HtrY2bf7VyZMnz5sbjGi2EhPNE44dO4af/vSnqKysxGc+8xktqPnyl7+MNWvWwGg0YvXq1bjttttQWFiIjo4OGI1GuN1uBAIBPPPMM/D5fEkrhe+66y4t6N+3bx/27t2L+++/f0pbvv3tb+Puu+9GYWGhdttVV12Fbdu2nbf9//7v/66lSBBC4PLLL0dhYSGeffbZ+b84OaajowNDQ0PYunUrgIkpGs8//3yWW5VdzCNFM9qyZQuqq6tx6NAh9PT0YMOGDfj1r3895/MYjUbcd999eOyxxzLQyotXIuXBwMAAamtrk/JuNTU14cyZM0n5tRI9Uq+//vqUc1166aU4fvw4ysvLUVhYiGg0Cq/Xi/fee29BngtdPJYtW4Z//Md/hNFoxLPPPouf/OQn2n1btmzBe++9h8rKSgSDQQwPD2PdunWIxWLYsWNHFlsNfPCDH8TAwADefvvtrLZjLtatW4f9+/czr9zszDmPlJYILZsbAMlt8W7XX3+9bGpq0v4+G/imtM3nWG7Jm9lslqWlpbKyslKWl5fP69+hpaVF6nQ6CUCuX79elpWV8d+KW8a3goIC+dRTT8nHHntMApBr166VK1eulEajUQKQa9askXV1dRKYeM8KIaSqqhc854033ihvu+22C+7z0EMPya9//etyxYoVc27zYvlcfPWrX5U2m23W+zc3N8/42s1nW7du3aJ5bea57Z1rDMMeKZpRYpI5C4kuLmazGWazWettms9nWVVVbfWTqqqIx+O8eqUZffjDH0ZxcTEef/zxlI7Pz8/HE088gXg8ju3bt0NVVWzbtg27d+/WspmfOXMG3d3dKCkpwQc+8AH8x3/8xwXPmRiKni61SoLBYMA999yDl156CatWrUIgENCKKOeKRMFyANi+fTva29uxd+/e8+7f3NyMgwcPzrv+ZnV1NfLz86fUQZ38HZLj5twjxUCKFpQQAnfffTeefvpp/lDPk9VqhcvlQigUwsDAAICJFBWJDMBEmXbXXXehpKQEjz766KyPsdvt2gXAsmXLUFFRgSNHjuCRRx7B0NAQ2tvbMTg4iL1792JoaEjL0N/Z2Qmz2Zy2VCg2mw2BQABGoxHxeByBQCAt582GvLw8RCKRCyYXNRgMCIfD2LRpE44ePTolaeps6XQ6qKq6lBOZzjmQ4ixSWlBSSrz00ktzDqI2b96MnTt3slfsHJFIJOkLcTa5nFRV1SaaE83Hyy+/PO3KuAvx+/1aGpVEb5Pf70dJSQksFgva2tpw7NgxDA8PIxQKob+/X8sXlc58cokyMxfqucoV574uFosFtbW1Savpli1bhmAwiGPHjs3rdYxGo0viNUsnrtqjBZdKOoOCgoIMtCT36fX6pFxeM+Xc0ul0MJvNzM1F85YoRN7d3T2n46LRKOLxOOx2O6xWK2pqarT7VFVFfX098vLysHbtWpSUlCAUCs26t2jTpk3Ytm0bmpqa5tSm+vp6XHrp3OYXZ9rq1auxfv36lI6NRCLo7e2FoihobGwEAAwODmJ0dBTDw8NJgZDT6cSyZctmfW63243a2tqU2rVUMZAiyiFmsxk6nQ42m02razeXGoOJWocXQ11Cyqz6+npUV1endGwkEsH4+DjC4TC8Xi+klHj66afx3HPPoaioCMFgEKOjo3A6nVi9evWsf+jHxsZgNptnVcD88ssv1y7QfD4fTCYT1q1bN6vHsdlscDgcs9o3VYWFhSgpKUnp2EgkgsHBQQghtAstRVG013qyaDQ6pwurSCSCQCAAk8mE5cuXp9S+pYZDe7RoqaqK8vJynDp1Ch0dHZBSory8HKqqor+//6LqVTGZTFBVFTabTRsWUVVVy2I+W7FYbKlMCKUs2rBhA0ZGRnD8+PEZ9y0pKcHQ0BAikYiWzXx4eFgrWKwoCkpKSjA+Po49e/ZAURSUlpbixIkTcLvdKC0thcfjgV6v1y4CJveCrVu3DocPH0Y4HMaRI0cQiUSgKApMJhMcDgf6+vqmbZfZbNbqTnZ1dSEej08bsJWUlKCvry8pAFFVdUqm83SqrKxENBpFR0fHvM6TqA7hcDhQUVEBRVEwNDSUNEXC6/XOaahvdHQUo6OjMJvNcx7WXarYI0WLkk6nQ1FREUpLSwFAy4HS2NiIysrKKaUhljqTyQSXywWLxaIVfY1Go0t5wictYnfccQe6u7vx1ltvzbiv2+2GXq+Hw+FAaWkpnE6ndp+iKMjPz4fb7cZf/MVfIBqN4uc//zmuvfZa3HjjjbDb7ejp6YGUEuvXr0dVVRWampqShvpXrlyZ9IN+/PhxvP/++zAajRecEvDKK69ocwodDgei0Sh2794NYGJRTKLH1+12Tykpkxgiy5TS0lIMDQ2htbV1XueRUuLUqVPIy8uD2WyG0+lMW9HyQCCAI0eOpOVcuY6BFC1KVqsVzc3NaG1thc1m0273er3Yv3+/Vg3eYDDAZrNpRXuXqtHRUW1YLpER3mq1oqCgAHq9ntnHadE6ePAg/H4/qqqqEAgE0NnZqd0Xj8cxODiIsbEx9PT0wOfzQVEU9PX1oa6uDuPj49i9ezfa2trQ29uLY8eOweFwoKGhAcBEIPaLX/xC61FxOBywWCwwm80QQsw6EKmursaaNWu0vxVF0YatDh48uOCLXN566y0cPXp0TscIIZLK65jNZu2+rq4udHd3o6ura9qJ4kII1tScBwZStCgl8hhdeeWVuPLKK7Xb33zzTfh8Pu3viooKXHXVVXOeXJprFEXRhvKOHTuGwcFBeDweDA0NobS0FG63G8DEkEO6rjhp8cv0PJ3p2Gw2BIPBOc+zO3jwIMbHx7WensSPvtFoxN/+7d/iH/7hH1BXVwedTofvfOc76O/vh8PhgMFgwJkzZ3Do0CFs2rQJTqcT+/btg8FgmFKYeMuWLVi7di1WrlyJG264Iem+Cw1Dvffee3j11Ve1v2Ox2LQ5mXQ6ndbuc4v3ZptOp9MuqCwWC1avXp10f29vL6qrq7Wam5N72UwmE4qLixe0vfM1eWg267Kd1ZyZzbnNZ3M6nbKmpkYWFxdnvS2Z3MrLy2Vtba00m81y+fLl8tJLL5VVVVXSarXK2tpaCUxkXF61apVctWrVUskwzO0Cm91ul7/85S8X9DF1Op189NFHkyodpLLp9Xpps9lkfn6+/NnPfqbdfsUVV8jm5ma5du1a+Xd/93fyiSeekNu2bZOKokgAMj8/X37605+W27Ztk1u2bJly3paWFllXV6dlQE8cpyjKtPvPdVu7dq10u90SmMgUvn379qy/D863nfsdcPPNN2sZ48vLy+eUFT0b7Z1p+9SnPiVramqkoijp/r6bc2bzrAdRDKS4zWdLBFKVlZWytLQ06+3J9FZWVibXrFkjm5ubZVVVVdbbwy07m9vtls8888yCP+5TTz0lKyoq0na+yYHUdJ/f++67T/7whz+U9957rywrK5N33323BDBjqZOWlhb5la98JS3B02Le8vPztdJOkze73S4vv/zyKbffeOON0mq1Zr3d022bN29O6Tv89ttvl9ddd10628JAihu3i2GrqKjQeqK4XXxbtgKpdF7519TUyO9///tSVVW5efPm8+53xx13yCeffFL+5V/+pSwpKdF+NM8XTN10003y9ttvl6WlpVogpaqqvPPOO7P+77YQm8FgkFVVVfIDH/iAvOSSS7Tbb7nlFmkwGLLevhzYGEhx47YUNiHEBYcrEwVcs91ObtnZ3G63/MUvfrHgj/vEE0/IsrKytJxLCCENBoMUQlyw6PbWrVvlj3/8Y/nlL39ZCiG04TqHw5G0n9VqlTqdTqqqKlVVlUIIWVZWJr/whS9I4H8Dr61bt8qKioppe3JyYbNYLLN6bS0Wi7zqqqu02xLPv6GhYdH2SqW6XXHFFXLr1q3pOt+cAylONidahKSUWv28891/9iKEaMEYjca05U+qrKzE3//93wPABVfFvfDCC/j5z3+OlStX4ktf+pK277kljvx+v7aCNbFYJR6Pa6vRYrEY3G43XnzxRfT09OR8mZPEysRz2e121NTUwO/3J6WnSOSPO3HiBPx+/4K180K2bds27xXHW7ZsgdFoxAsvvJCmVs0dAymiRYp1BXOXyWTCP//zP2fk3FVVVfjsZz+Lr371qxk5/0JRVVVbwXchwWAQO3fuxE9/+lOsXLkSX/ziFwFgyoWElBJr1qzBihUrtBV6w8PDeOqppwBMFPYdGRmB2WzO6c9WIhFxUVHRtEGtz+dDV1cXgOmrHkSjUUgpUV9fj6qqqsw2dgavvfbavAPat956C3v37p1zguZEMth0YCBFlEFms3nONbwURdGSFk5OXki5Q1GUjNUjM5lMKCsrQ3t7e0bOfz5f+cpX8P3vfx+Dg4PzPteKFSuwdetW/OY3v4GUcsbklj6fD/v27cPPf/7zacuS3HTTTbjtttsQDAZRVFSE5uZmABOBRCKzuaIoiEaj8Pl8sNvtGc1MnkmJINDj8UwJJq1WK+x2+5REvZPrcQJATU0NQqEQhoaGMtvY8xBC4JprrklLUtOxsbE5VXdIkFJqGfDnKzffSUQ5IhaLzfmHR0qpXUmyJl7uycvLw1133YUf/ehH2W5KWl122WU4cODAvEszrVq1Ci0tLTh06BCOHTsGALPK0O/1etHa2gqz2Yzt27cn3VdfXw+v14tTp07hxIkTWsbyyUNfiXaHQiGEw+GcHxofHx+f0rN2bh1NIQTcbjcikQjWr1+v5V3yeDwYGBhIysmXaQ6HAxs2bAAw8R3X29ur3VddXZ2VnFyJIeCEyUlM54LpkIkyKBwOz7lelpRSm8OwWOYy0OxZLBZcf/31uPvuu9N+7tLSUqxZs2bB54Pcfvvt+MMf/jDvwL6xsRGNjY3o7e3F66+/PueEoj6fD6+99hpuuukmjIyMwOfzIRgM4ujRo1rFg0TVg3PnD01ueyAQ0Gr35WpAtWHDBrz77rtJzysUCiEUCkFRFBgMBi1ALSkpgZQTtUpNJhN6enoQCASSzieEgNFozGgNU5PJhA0bNmDfvn1TMre7XC4oigKPx5NSD1Mq4vF40uuX6nuBPVJERDmgtLQUGzZsgNVqxa9+9asFe1whBO699148/fTT867teOmll8Jut+OVV15BNBqd89CSx+PBM888g0OHDuHjH/84PvShD6GiogJvvPEGRkdHp+x/oR/Gc7N755obbrgB+fn50w5RCiGg0+m0RSsulwv79++HxWJBfX098vLypjz3RCCVKWNjYzhw4ADKy8un3NfR0QFFUVBZWbng0xmEEFqJsVSDSAZSRHRRWrFiRbabMCctLS1obGzET3/602w3JSXFxcWIxWIXXI06G5FIBL/73e/Q1dUFp9OJF198cUrvCjBz70IoFMrpSecApi1CnCgnlag/aLFYcOLECdjtdrS1taG9vR1erxfxeBw2m00LXKSU076O6eTxePA///M/097X3d2NwcHBKb2eQghtyG1y3dV0UVV13qWWGEgR0UVHURR885vfzJkJx1arFUKIKUv+F0o6hr+2bt2K3t5e/O53v5vXecxmM772ta/hkUceQTQa1YaELjbj4+M4c+aMVsQcmHhf5+XlobCwUPu7rq4OGzZswIYNG6CqKtra2rQgq6mpCRs3bgQwEbCkY+L1fJw4cUJbHJCgqiqqq6sBTLQ33b2IkUhEm1OXau2+i+/dRxcVnU4HvV6f0134lDuEEDAYDGm/sv/4xz8Op9OJ733ve2k972yYTKa0zJv58Y9/jD/+8Y/zPo+UUhti7OrqwiOPPIKSkpKL7jP+b//2b1pAlFBQUACXy4Xu7m4AE6+Vx+PBK6+8og2nLl++HBaLBUIIvPnmm9ixYwcAID8/Hw888MCCP4+ZRKNRHDlyBACwZ8+etM1pS7xfDAaDNtyYyLU1Z9nOas7M5twyud17773y05/+tCwpKcl6W7gtnk1RFPlf//VfWpbsdG0NDQ3y8ccfT3t777//fnnPPfcs+OtkMpnkf/7nf6blXF/84hflrbfeOu/znJvV/6//+q/lY489xpJJc9yKi4tlRUWFzMvLy3pbZrtde+21aanoUFBQoBWfnmZjZnO6OFx11VWoqanBypUrta7p6Tz55JMYHx/HjTfeiGXLls3pMW688UasWrUKq1evxtVXXz3fJlMafOITn8Cf//mfZ7sZC46Z7CcUFxfjJz/5CZ588knttm984xsYHR3FnXfemXPz3rKpr68Pfr9fm2idTY2NjbNqx6uvvpry52DyY/h8PqiqitLS0pTOdS4GUpSTotEompqa0NDQoHXHms1mXHvttUn7bd68GT6fD3/4wx/Q2dk56/Nv3boVR44cwdGjR9Ha2oo//elPaW0/0Ww9/PDD6OnpydlJ5ukkhIDf78f999+fdPvAwAAqKyuxfft2LRnnqlWrtCzouaSlpSVtP/Dns3z5crhcLgwPD2vzg7Jh7dq1UBQFR44cyWiql6ampqTSOMFgEL29vejp6UnL+ZlH6iJy6623wuFwYPfu3Th58iSKiopQV1cHYCIw2bNnT5ZbOHt79+7VJpjG43E4HA40NjbijTfeSNpv9+7daGlpwerVq9Ha2jrrFUMGgwFCCK2UgtPpTKpbRbkvV3p4EhNgc32F2eOPP576HJSz+vr68KUvfWlKWZHvfe97UFUV8XgcV1xxBb7+9a9Dp9NhcHAQn/3sZ/H9738f3/zmN+H3++H1evHtb397QZNRztYdd9yBvXv3TplwnW7t7e3a+99msyEvLy9tQcWFmM1mrF27Frt37wYAHDp0aEHe162trYjFYigtLcX4+Dii0SiEEGnLmcVAahFzOByoqanB/v37p73/gx/8IA4ePIjTp09f8Dx33XUXFEXB66+/jnXr1mlfzENDQ1AUBdXV1Thw4EDa259J536RJvKmTF7BAkysyHjnnXewZs2alCajrlmzBg0NDfNesk0Xj1wJ0GYjFArhC1/4Ah599FF88YtfnFcg9NGPfhQdHR147bXXUj5Hfn4+HnzwQa3YccLkyf1vvvkmDh48CGDi8y+EQCQSwTe/+U3E43HE43H4/X48+OCDcLlcCAQCePPNN7Fr1y5UVlbitttuw+OPP55yG1Nx00034eWXX4bRaMTtt9+Ol19+Wcv6ngmTgxefz5fxtAcJwWAw6bdmuvdTVVUVhBBzGkGYSeJxBgYGtP9P1NpLRyDHob1FTFXVaVPWr1mzBnq9Hna7HZs3b5629tRkb7zxBqxWK/x+P958802cOXMGwP+WL2ltbV2wD1I21NTU4MyZM9Mm7Duf119/Hf39/ejs7NQKgFJ6PPjggxmrQ7fUPPDAA9i3b1/Weov1ej3uvfde/PCHP5z3D05BQQHy8vLmdQ6dTjfjsJfP50NfXx/6+vowPDysJf3s6elBX18fBgYGIKXEc889h1/96lf47W9/i6NHj2LlypW44YYb8Pzzz8NgMOCee+4BAHzmM5/RiiBnyrvvvotYLIZAIAC73b6gaQji8fi8CwfP1uSqDZNNnr80MDCQsQvXaDQKg8EAvV4PIH29vDP2SAkhKgH8BEAJgDiAH0gpHxVC5AP4DwDVADoAfFRKOXL2mL8GcB+AGIDPSSl3pKW1FxGn03neydGJvClvvfUWNm7cOCVJmdFoxJVXXolXXnkF11xzDaSU2LVrF3w+35RkZ9FoNGu5aRZCfX09xsbGMDg4OKW36kIS8wZKSkrgcrkW5TBArqqrq5syBLuQDAYD7r//fnz3u99d9L1HdXV1eOedd7JWXDYWi+GNN97AVVddhUOHDi3612suTp48mfS3qqrYt28f2tvbtWLjQggtyLnpppuQn5+PXbt2wWazQa/Xo6+vD+vWrcPvf//7ebUl8X2Tn5+PwcHBKWkNcs3atWvR3t4Oh8ORVDj6fCZ/P2fyot7lcmnz7NI5pDibHqkogC9JKVcB2ATgQSFEI4C/AvCylLIOwMtn/8bZ+z4GYDWAmwE8JoRILcvVRcxiscBqtWq9R5OdOnUK0WgUZrMZqqpO+XKTUmo//D6fDz6fD++//74WRBUUFGDZsmUoKCjI/BOZB5PJhA996EPzOkdJSQmGhoa0D2l5eTmqqqpmfXxhYSHy8/Pn1QaaKps/yFJKjIyMpP3ftaKiApdccgmee+65tJ43m4QQyM/Px/DwcLabknEDAwPasFM0GsUrr7yCq6++Gnv27EEsFoPX68XY2BgikQiCwSCCwSAikYhWF05RFFx33XUAJlb8Go1GtLS0zOl71mazoa2tbc6FzheDK6+8Ups+EQwGEY/HEYlE4HA4UF1dDZfLBWDidTq3k2ChsszHYjGEQiHt9yBducdm7JGSUvYA6Dn7/x4hxBEA5QDuAHDN2d2eAvBHAH959vZnpJQhAO1CiOMAWgC8mZYWX0Si0ei00Xli7Nhut6O/vx8jIyNJ94fDYW0o4O2330ZRURHy8/MxNDSEWCwGg8EAt9sNl8uFeDw+5fhsKSwshKIo6O/vh9VqxebNm7F9+3b893//97T7OxwOSCln7FFzu93o7e1FJBJBRUUF3G43wuFwUvXxC+HS8/8lhMDmzZsBADt37pz35OG5WLVqFcLhME6dOjWv88TjcXR3d2uru9KlqqoKzc3N+Ju/+Zu0nG/jxo14//33s9YbBUwMpd11113Yvn171tqQDZFIBL/5zW9QUFCgffZ37tw57b6Th6ESGcWLioqgKApcLheamprQ2tqK/v5+OJ1OFBUVobOzE2vWrMG+ffu0Y2tqamAwGHD69OkFK9qbTpMvTLxeL6LRqPYdW1FRAafTCZ1OkicWzQAAIABJREFUp5UKmmy2SZNdLtecfq8m72+1WrXSOKqqQqfTacHbfL/f5zRHSghRDWAdgD0Ais8GWYlgq+jsbuUAJs9+PnP2tnPP9YAQYq8QYu/cm31xMBqNKCoqOu/9b775Jl544YUZf1hKSkpQWloKnW4ibu7p6cHp06dhMBjm1DuTKTU1NWhsbMS1116LdevWAQDy8vKwceNGtLW1aRXjGxsbsXr1aq2wZmVlJWpqamC1Ws97bikliouLk+Y4lJaW4pJLLplV2zweD3p7exdNsJltqqri85//PLZs2ZJyOYVUNTU1IRwOo62tbV7nMRqNePDBB/Hoo48u6gD5vvvuw0svvYT29vZsNyUtFvNrPR0pJZ544gnU1dXNqgRNPB7HL3/5SwDA008/jUAggB07dsDpdGrzuvLz89HY2Aij0YhNmzZpx9bU1KChoQFWqxUlJSWwWCxwOp0oLy+fdx24hbJv3z643W4IIbBs2TIUFRXBYDCgt7cXnZ2d0Ol0KCkpQWNjI44cOZL02zM4ODireVoFBQVz6kWa3BuYl5enVbowGo3aKs/E+ebzfTbrVXtCiDwAvwbw/0kpxy/wZKa7Y8onSEr5AwA/OHvu3PqEZZher4fBYIDP55syjj+Z0WhEJBKZsUt0uhV5fX19iMVi2hXUQistLUUgEICUEtu3b0dlZSXMZjPeeecdlJaWIhKJ4Otf/zr0ej3+5V/+RXuzl5SU4J/+6Z+0VTnl5eUQQqCjo+O8j3XixAmtZy/xwUkElTNpa2ub9w/3UiOlxCOPPLLgP4wvvPACbr31VmzevFn7wSLKtIcffhgPPfRQynN3Jveonzx5UvtO/+53v6vd/pGPfATf+c53sHLlSlx66aUIh8PIz89HfX09Tp48iZdffhkOhwPDw8OLaqhVCAGbzYbx8XFcc8018Hg82LFjB3bt2oV169bBYDCgq6sraQL5wYMHoSgKbr31Vnzve9+b0/fI8ePH59S+yfsn5mkVFxcjGAxqk94TvwkWiyXlnsBZ9UgJIfSYCKJ+JqX8r0S7hBClZ+8vBZDI6nUGQOWkwysAdKfUuotUVVUV6urq4HQ6Lzj8sGbNmnnN8xgcHERbWxt0Oh10Ot2C9DAkeo++853v4GMf+xjuvvtu/OQnP8FDDz2EH/7whygrK8O3v/1tfPWrXwUw0cX+uc99Dg899BAeeugh9Pb24nOf+xzq6+sRi8VmHFqKRqNobm7WJuRHo1H4/f6LfvK4EOKCPXmZYrFYUi4w+8lPfhJer3deQVTiead7Mq9er4eqqmlNKpjtHpzEj8timvgspUQgEFjQbNw+n0+rTZcp3/rWtxAMBvGv//qveOyxx7BixQpEo1Hs378f/z975x3X1PX//9fNICFhhCErbBkiKOKsSq1VP9qqtRZtFXAU666CintUse6Fe1Rs3au2Fqu2dVVbxIp7TwRkirJ3QnJ/f/DL/RISQhKywDwfjz4queee885Ncs/7nvN+v1+enp6wsbHBuHHjYGFhgaioKOo8NpsNNputN9FmDoeDzz//HABw+PBhSpCaIAjQ6XT06NGD2mWojVgsxo4dO1T+jmtijhKLxXLjihuznapM1h4BYA+AJyRJbqh16BSA0QBW/f//x9d6/TBBEBsAOAHwBmCsZKgCtQtNKvqiaSJ2h8fjITAwkFrZunbtGvVl1WT8i0SV/IcffsDw4cNBkiT27dsnFXshFouRnp4OgUAAHo8nt5+pU6dK/S0QCGSyFmtz7do1dO3alboJ3rlzB3fu3FHabklRzuaGtbU1Nm3ahBEjRqh0Hp1Ob9T3YuvWrTAzM9PbNbWzs8PatWsxatQojfbbv39/eHp6Yvny5RrpT7LtoM/vnrW1NTZu3IiRI0fqzYa65ObmYtGiRdi2bRsiIiJ0MmZUVBS2bduGJUuW6Kye3IkTJ6h/S4pXbt++HWvWrJFqFx0dDQaDgSNHjuD58+fUfY4kSakaSbXnFE1SVlYmU3E/PDwc8fHxuHnzJm7e1GzkTkBAAO7fv9+o34U2PkNl9je6AxgJ4AFBEJLKkPNR40AdJwjiGwCvAXwJACRJPiII4jiAx6jJ+PuWJEndRaQ2cQICAsDn85GZmYmHDx8qbHv79u1GjSWJnbp+/Tratm1LBagfP34cNBoN48eP18iXztPTE5MnTwZBEEhMTKy33fXr11FaWoqQkBCl+m3bti1cXV3lZjbWpX379nj48KHKcggdOnSAUCjEvXv3VDqvOWJmZoa4uDgMGzasUf1ER0c3WERWHu9T0L9kldZQtnCM6J+SkhJMmjRJ6rW6jvtHH30ENzc3XLx4EV9++SViY2NBEATWrVuH0tJSrFy5Uic1A8ePH4+jR48qDLlQB1Xvw+3atZNb0FoSa6Wp7EhlsvYSID/uCQB613POcgCaeTRr4nTv3h0BAQF49eoVzp8/32D7hw8forKyEp6ennBwcABJkhAIBI3Wetu7dy/u37+PsrIyBAQEwMnJCZcvX8bff/+NAwcOoKqqClu3bsW1a9fw5Zdf4tChQ9i5cyfmzJmj8r40AEycOBF+fn6orKyEnZ0dnj9/TgWJAzWT4oQJEyAUCqUmx0ePHoHD4SA0NFSl8QoLC3Hr1i18/PHH+Pvvv6WOXbt2De3bt1f5PUjw9/eHhYWFwevtMRgMHD58GMOGDVPK4XhfnJK6vK/vW1Xy8vIwevRofZshQ1lZGb755hudjjl16lSsWLECcXFxat0PNc2OHTsQFRUlUxfw8uXL1Cr6xo0bAdR836Ojo6l/AzVFRpOSkmBubg4LCwvEx8dDkzx79kwnW8Kenp4oLCys94FDnhNlZ2cHgUCgUoHmhjBWNtcykjL0qu6v1z5P3b15giBw8OBBMBgM0Gg0nDx5Ej/++CPi4uJw+/ZtEASBx48fY86cORCJRJSWnFgsxpgxYzBixAiFwe512bFjB/h8PoYPHw53d3eQJAkrKyvQ6XQQBIHS0lLQaDRMnjyZem9Hjx7FBx98IFXpWpWJTnKdJOfVd63u3r0LW1tb9OjRg9IXVIbbt2/j559/VriSpm+cnJywf/9+7N69G6NHj27w+nl4eFA3WSOGiSE4e9bW1ti5c6e+zZCBw+Fg69atOh2zdnaXIaAoVkjy3an9Haq7miuZA/755x+pYqJcLherVq3CwoUL1Xq/3bt3h42Njc5itlJSUlTOqH779q1GnSjA6EhpjcGDB8PV1RU3btygnBZlSU1Npfa7G/PjJUkSM2bMoNJKhUIhhEIhHj9+jJSUFLRo0QJdunRBq1atwGAw0LlzZ3Tr1g3dunVDVVUVKioqVNpTNzU1BY1Gw+nTp3Hz5k1UVlaCIAgwmUy0aNGCKjIqKcwmsUni6NW2W5mJ5OnTp0hOTqb+rqysRFJSEoKDg2XazpgxA5aWlkhKSkJFRQW6deuGwMBAWFhYYNOmTfWOISkqp8t6SXXZsmWLwsDwN2/eYMmSJWCz2Uot29NoNFRWVmLGjBkq29LYCd4QHAQjykEQhFyJKn1TUVGBmJgYqWxeXbBq1SoMGDAAgYGBOhuzPubNm4eFCxfCwsJCrfMFAgHEYjFEIpFU2YHy8nKsWbNGpur/uHHjMGfOnAZX9n19fcFgMPDff//pZFvaxcVF5fIQkvdlZmaGFi1aaMQOo2ixFhgyZAj4fD7Mzc1x7do1lRWmHRwc4OzsDOD/Kq8SBIEOHTooDN7j8Xjo2LEjevXqherqasTHx1MF35YuXYoRI0bg559/RnJyMqqrq8FgMMDlcqmbJZfLpRyfLl26UP0+e/ZMqR+F5AtaWlqKixcvIikpCZ06dUJoaChVoA4A9QRhZmaGAQMGKAwWV4RAIKCKbPr7++PRo0eorKyUe/OXyOpUVlYiNzcXJSUllLbVrl27sHz5cqqQ4pw5c7Bv3z7069cPT58+ldE5Y7FYWLRoEWg0GhYuXKi1irwEQWDZsmXYvn27QgdJJBKpHMumzjmNJSYmRi+Zgk2N77//HitWrGh20k1Hjx5V+V4oD5Ik8e7dO41NgspSUFAALpcrFaKgL/Ly8vDTTz9pPPuYJEm59/ozZ86AyWRKHbO3t8dnn32GuLg46jUmk4nc3Fyq8KW2efPmDWxsbECn01UuXFtRUYGqqiqN2GF0pDTI4MGDwWQy4ePjQ9UBkaTp2tnZoVOnTrhx40aD/RQWFiIvL4+adExMTNC6dWukp6cjICCAaidxsrKyaqpLeHh4QCwW459//sHXX38NKysrtG3bFmKxGDQaDZ6enggMDIS5uTkcHR3h4OCAXr16oby8HFu2bMGrV69AEAQ++OAD8Pk1NVQl2XaPHz9GixYt4OrqilOnTtVr++jRo7F//368fv0aBQUFcHR0RElJCbhcLlW9VnIDtLW1RVlZGf7991/Y2dmhuroaqampKCkpqVf5vEOHDlKZY6WlpcjMzISDgwPEYjFevHgBgiCkCm4+ffqU+re9vT1oNBpKSkpgb2+PtLQ0mJiYwNfXF0CNeOY///wDOzs7+Pn5gU6nIzU1FZWVlbCxsUF6ejp8fHzwxx9/oE2bNggMDMT9+/e1smJFkiTOnj2Lfv36ISUlRSOTUO2+dY23tzfi4uLUCvAcNmwYXr58KVUJurni4+ODly9f6kxIVlcEBQUhOztbI8VtxWIxtm3bhkmTJiEuLk4mVkhb/Pbbb0orImiblJQUhIeH488//9R69XvJHFObkpISKqMQACIiIpCTkwMXFxfQaDR4eXkhPz9fqytTVVVVKCwsVMtpU6Z8jrIYHSkN0rp1a6qejCT1lCAIZGVlwcbGBn5+fhAKhXID4GpTWlqK169fgyAIuLi4UK9ZWFjAwcEBNBqNmggJgqCekKytrVFZWYmSkhKQJEkFRUpih8RiMTp06ABvb2/w+XxYWFhQNXVevnwJDw8P5OXlgU6nU9pI+fn5sLGxAY/Hg6urKxgMRr2O1PHjx/Hll1+Cx+NR1dbT0tJw8eJFfPbZZ1Rpfi6XC5IkKfkFPp8PMzMzqgZPZWUl3r17ByaTSTmCkh/Kp59+ChMTEyQnJ6OwsBAFBQWoqqqigvIlVYj5fD6ePn0Kb29viMVimJqawsnJidLGEggEcHZ2ppwqCY6Ojnjz5g169uwJW1tbZGdno2XLlhCJRLCwsICVlRVsbW1x7tw5WFlZobi4WC2nJCQkBBwOB5cvX1aYdXj16lVERkZKPfU1ZSRbq6rStm1bnD59Wu4NvblAo9EQGhpqULE4msTX15cSAW4sJEni+vXrsLGx0cnKhwR7e3uUlJQYTDZlYGAg/vnnH73ICJWXl0tllqenp+PFixfIzs6mwkYqKipQVFQEgUCAzMxMmUD9Vq1a4fnz52jXrh1sbW1x7949GYFjBoMBb29vVFVVycTsSuY8TT5kqoPRkdIgEueJTqdTDhWfz8ebN2+QmpoKHx8f+Pv7y3WkJGrZLBYL1dXVKCgoQGpqKphMJuzt7UGSJDw8PADUPB107twZ7969Q2ZmJvh8Pvh8PjWhs9lsEARBleCXOFEMBgN8Ph+Ojo4wMTGBSCQCnU6HmZkZPv/8c9DpdOTm5iIlJQV2dnZwc3NDfn4+EhISUFJSAgaDATs7O3zwwQdSTyIS/vjjD/Tr10/qtZycHNy+fRsDBw5EVVUV2Gw2qqurIRQKwWaz4ejoiMDAQBQVFYHP50MkEsHR0REuLi4ICAhAcHAwCgoKIBKJwGAwwOPxKNHLvLw80Gg08Hg8vHz5Eunp6fDz84OXlxcuX76MlJQUtGrVCi1btoSpqSnatWuH8vJypKeno6qqCm/fvgVBEFISO1lZWfDx8YGNjQ0YDAYKCwtRUlICHo9HFSc0NzdHq1atYGZmRknPeHh4ICcnR8pJsLGxga+vr9xAdTabjV69euHp06cNlm8wxhW9H9eARqNh0KBBOH/+vE6dg/qorKzExYsX9W2GQs6ePYsePXogKSlJJ5Mpi8VCmzZtZO4bRoALFy4AqHl4dnd3R35+Puh0OlgsFqUmYWJiAm9vb7BYLHC5XCroWyLb4u3tDZIkkZubCysrK+qBmMFgyF09otFoVGiIJovhqorRkdIgr169goeHB5V+ymQyERAQAAaDgZSUFLx58wYWFhZwdnamJk8+nw+xWIz+/fvj2rVrKC0txZs3b6jlbyaTCVdXVzg4OFAOUU5ODgICApCcnIz8/HxKdV6y8iSpVP7111+jqqoKBEGguroaTCaTCp6WxBJxOBwUFxdTDom9vT1evXqFJ0+eID8/H/7+/khOTsZ///2Hd+/e4auvvkJISIhcR0oRVVVVePDgAUxNTantSR6PB5IkweVyQafT0aFDB7Rv3x6WlpawtrYGm82GpaUlaDQaBAIBzMzMKE2mVq1a4fXr1xCLxeBwODA1NYWvry/odDq8vb1x+fJleHh4gM/n49WrVxAKhWjVqhUyMjKo7bzk5GR4e3tT9khWvxwdHZGXlwc+nw8ajYbq6mpqVYskSTg5OaFly5a4efMmevbsCZIkERQUhEuXLkk5Us7Ozhg6dKhcR+rw4cNKieYGBQXh3r17eg12B2omEGX1CY00jri4OL07jhwOB+7u7ti7d6/G+kxJSVG5jpsyhIeH48mTJzpxpP78809MnDgRAIyOlAJSU1NlakiZmJiAz+fDw8MD5ubmCA4OxpQpU6jVRaAm68/Kygq5ublwcHDARx99hMePHyMnJ4eK6bS2tkZBQQGVlMRmsyEQCIyOVHPh8OHDGD16NDw8PMBgMGBqago2mw1/f3/Q6XTcu3cPnTt3xieffIKTJ08CAPr06QM6nY6ysjL06dMHly9fRnZ2NpydneHu7o7w8HAqe48gCIhEInTr1g1isRgBAQFo06YNRCIRBAIB9cViMpmorKyEhYUFFfckEolQXFwMFxcXpKenIzs7m4qTSkxMBI1GQ0BAANzc3BAbG0vdyFevXk29v5s3b4IgCLWU4IuLi7F3717QaDTMnj0bIpEIHTt2RGZmJkiSpGpmATVB6EBNrEhxcTF8fX1RWloKFosFExMT0Gg0cLlc9O7dG5WVlRCLxejTpw9oNBpEIhHS0tIQGRmJlJQUuLi44L///qOCCrt374527drh2bNnuHTpEiZOnEg5vlOnTqXiyRgMBjgcDj7++GN07NiR2i7t378/ysvLYWZmhqNHj8LFxQU9evRAcXGxxlcRCILA/PnzMXz4cL07UhYWFhg6dKhexubz+SgoKNDYjVLfToohY25uDmtra9jZ2aFv37549OiRxvpOSEjQWHCvkRqys7NhbW2Nt2/f6n17qyHMzMzg4uKC06dPg0ajUTsstbl69Sr17/LycmRnZ4PBYMDX1xdVVVXU/Hfu3DkIBAJ4eXkhKysLBQUFoNPpoNPpOouXq43RkdIw+/btw+TJk+Ho6EhliYlEInh6eoLD4eDixYsYOnQowsLCIBaLUVVVBS8vL+zcuRMhISEICQmRipGQTKCSeKu8vDwpx4rNZqOqqopaeQJq9OSSk5PRrVs3KvgaABXo/ffff+Pvv/9Gz5490bdvX/zyyy/U1t3Ro0cb9f5JkqS2vGp/oel0OiwtLVFUVIRVq1YBqKnevHr1auTm5mLgwIHw8vICk8mEqakpTE1N4eHhgVevXlF1qNLT02FlZQUulws2m42ioiLk5eWBJEkwGAwIhUKwWCyYmZkhLy8PZmZmyMjIQGhoKKW+XlhYiNzcXHC5XAwePBiZmZkwNTWFtbU10tPTKSfU2tqaWsETi8Vgs9kgSRLJyckwNzdHdnY21q1bZxBOji7Iy8vD+vXrsW7dOrXO5/F4KCwsVMuJmTdvHjZt2lRvAoK+MTU1BUEQzUK/8cMPP0R4eDgePXqkMbkbCSNHjsSzZ89w9uxZjfZbXFwMLpeL/Px8nfwWy8vLqdIQuqgSrojY2FhMnz6dKmtjyOTn5+Off/4BUDOfrVixAhYWFnKzU5lMJt68eYNff62R9k1MTETv3r1RVlaGtm3bIiEhAUKhkFrJkoSd8Hg8qetgZmamk8KgRkdKC2zfvh0TJ06Ek5MTTE1NqRUWe3t7BAYGSjlCksKU06ZNA0EQKC8vh0AggEgkkmon+S8mJkZqMho0aBDS09Nx584dSiiyuroabDYbXbt2xd69eyEUCqn2Dg4O1L/FYjFatmyJ5cuXIzIyUu57UWfii46Oxvbt26n4CrFYDGtra2zevLneSsmnT5+W+tvd3R3z58/H999/L9N26NCh6NOnD+Lj45WqFg8Ay5YtQ6tWrXDx4kVqNRAALC0tsWPHDgA1JSJqM2/ePNy+fRt//fWXTH80Gg3Hjh1TamxFNHR9DWX1xN7eXkbnS1kIgsAPP/yAsLAwtTLRNHUNJKU9NE1YWBgqKyvxww8/aLxvXSJ5YLlx40aTKtg6a9YsrF+/Hjt27NBJ1fH9+/fjiy++wNChQ3H48GG9P0ht2bJF7zaoS0xMDBYtWiTj7LRt2xZubm6UIyUWi6l7fWJiIrp27YoHDx5Q5/n6+qJHjx4QiUR4+vQptbMQGRmJlStXUg/bIpGI0iGU9KsJjI6UlpBUBO7bty86d+4MGo0GJpMJkiRRXV1NfZA0Gg1VVVUoLi7Gr7/+iszMTJXGqZ1B16lTJ4SEhGDu3LkAaiaOI0eOYPjw4dQEVntSkmR7TJgwQarPxgr11j336dOnmD59OrUS1VhOnDhBlStQlbq2FRUV4euvv8ahQ4c0YVqzJTs7GxMnTsSuXbvUOt8QHEJfX18sXbrUYFeO9J2tN3LkSClpESP1c/LkSfTs2RORkZGIjY3Vqy1z587F77//3iT1QKdPn47169dj+fLlUpmQt27dUljq5Nq1a+jUqRNevnyJgoICPHr0CI8ePYKdnR0+++wzxMfHU6teEmbNmoXDhw8jLS0NvXv3BpPJ1NjqqLGyuZY5d+4cli1bhqVLl2LRokX47rvvsHTpUixZsgRLlixBVlYWCgsLceDAAZWdqLokJSUhLi4OO3fuRGVlJb766iuZCWzp0qVo3bp1vcKzBEHg+PHjYLPZ1GszZ87EwIEDqb89PDzqnVCnT5+O+/fvyz2m6clUE/3xeDwcPHhQA9boH1Wuh42NDfbv3690eycnJ8TGxiIsLEwd0wyGN2/eICIiQt9myGBhYYGffvpJaZ1EbbB3717s27dPK31///336Ny5s1b6BmpWwcPCwrQ6Rl0uX76Mixcvyqxk65ply5YhODgYH3/8sV7tUJfo6Gi1ykncuHFDpiZZbm5uvbqBK1euRFpaGgDg/PnzUk6UhYUF1q5di1GjRqlsB2B0pPTO7t27sWvXLo1p/9TWm1P1hlxdXY3Q0FCp8xYtWkRp8AE1TwoLFy5s0IamQmFhIcaMGdOkbG4sr169QlRUlMrvWd8rJprAUD/n4uJijBkzRq82jBgxAuHh4Vrpe+nSpRg9ejT+/PNPrfSvLxTpe+qSxsqJ6ZNVq1ZJyYbpg+LiYsyfP1/tnQmjI6Vn6modaYPt27crlOZwcHCglqfrZjwIhUKpfWSxWKyxrAiSJLFixQopwWJ9oCiTSF8TL5vNpmK3NI2rqytiYmJUPo/L5WLbtm1asKh+YmNjERsbq5J4dlPE3Nwc69ev19v4EydOREFBARWTommmT58Of39/g6iPpUmePHmiduygJvnpp5+QkJCgbzPUwsTExCCcwMZoqhodqWYIj8fD8uXLQZIkZs2aBQsLCxk17v/9738YPnw4kpOTsW7dOqknAl05D4sWLaIKbcpDk3Zs3bq1wYryqqJMoHh9bRYsWAAnJ6d6zyUIAjwejyoVoUkk13vRokUqnUej0XT+5Mjj8ShdxMbQrl07TJo0SUNW/R8SWYzGxFrY29sjOjoaa9eu1aBlqsHlciEWi7WWhbZ//3506tRJrqC4pti1axdVxFdXVFdXg8PhYNasWTobUx4DBw40CDFldVi3bh1GjBhBZVY3RYyOVDMjOzsbO3fuhJubG4Cawmh1J/PTp0/jwYMHsLW1RWVlZYOVtbXF69evpTIKVUFVJysnJ0elNNiIiAi0bNlSVbOUxsXFBYcOHVKYZUQQBIYNGybjBNfFxsam3qzL+qiurla5oGBFRYVeJ/vGwOVyUV1drXawfH3Y2dlBKBQ2Sj+OyWTCyclJbwUeR40ahYcPHyoURG8sOTk54HK5sLCw0NoYb968AYfD0eoYdXF3d8dnn32m8ZIOqnL9+vUmu2qblZWFv/76C71799b77oS6GB2pZkZ5eTnu37+v0NFIS0trULNs9OjRVNmGCxcugMvlomvXrkrbERwcjA4dOijd3tB49uyZ3PomyuLr64tPPvlEYZsnT56gqKio3uMkSeLu3bsNOo1sNhu+vr746aef1LJVWYRCoUZ00vRFaWmpRgtMahJ9xm49f/4cd+7coSpHa4tLly6By+WiS5cuWhvjypUrOv2MS0pKkJ6ejlatWulsTHmkpqbC3d0dQUFBerVDXZ49e4ZHjx416p6rT4yOlBEANU/Fksy8U6dO4eOPP6Yy9+7evQs2mw0/Pz+qPZvNRv/+/evtLyAgAD4+PvWOYegkJiY2amJxdnaGq6sr/v7770bZ8ddffyk1yQoEAqVrar2PKNpmfZ/p06cPnjx5ohXplrowGAyQJAlXV1d06tRJa2PQ6XSt9C2PvLw8PHr0CB9++KHOxqwPkUgEFxeXJrvFx2QyG1x9N1SaptVGGoROp+ODDz4AUDOJdOzYEaamptTxuhMLi8WiMnYOHjzY4JYbh8NBaGio3GOPHz9Gbm6uzMRlYmKitawgbdDYiTc7O1um0Kgux5eHlZUVAgICVOrbwsICPj4+Wt36MaJ7OnXqRAnI6gIHBwckJyfj+fPnUoWBNUn37t31vjqkL+7evYuKigqVdg4MiZ49eyqMGzVkjI5UM4XJZGLGjBkAgJcvX2LChAmwtraWamNhYQE+nw+9G+YDAAAgAElEQVSBQIDk5GSl+66qqlK4H//LL7/gwYMHSvdnCCsF6tigyaKlusLLywshISEygqKKcHZ2xqBBg6gis+qgzvv18vLSyBOqlZUVJRdk5P/49ttvcfz4cZ2sRjk7O+Py5cu4c+cO7t27h99//13rY76PFBUV4c2bN/o2Qy2ysrL0KjzcGIyOVDPCxMQEPB4PYrEY7969o17/7rvvZKo5l5eXw9vbG+PGjcO7d+9kpFgUTXxZWVlSYsbKIBKJpGxqaBx9b8XweDxUVFSoncWkK/uZTKaMg9wQGRkZOg0at7W1lfvZN0RMTAxKS0sblTJvZmZGPenGxcWp3U9zRFffUSsrK4SHh8PLy0vrYxUXF+tc/04kEjUq2UCT3L9/v8lu8R88eBBZWVlSxaCbCkZHqhnRrl07zJ07FwUFBZg+fbrCtmfPnlWpsnXdG66qN+Hc3FzMnDlT6faq2qPsOcqeN3/+fFy/fh2JiYkqj6NLfH19MX/+fKUnD3UnTnXPY7FY2Lx5M6ZMmaJWvbSZM2ciLy9PrbEBYPjw4WCxWNi7d6/afdQHi8VCdXW11uvAaQM2m43KykqtO1JsNhvz58/H8ePHdSJhcuTIEUoYV1fk5ORoTP6qsXTr1g2hoaEwMTHRtylqERoaim7duunbDJUxOlLNGEPYMmvKzJgxA71799a3GQ2SlZWFqVOnaq3/xmovqjumIUMQBJYsWYKLFy/i0qVLje5L14Uq9+zZg2nTpml1JYUgCGzZsgWrVq1CSkqK1sapzbhx49CvXz+dfn/4fL7KK/Ta4tKlS7h16xaio6MN/jckj6Y6ZxkdKSM6pan+UN5XevTogZCQkEatJqr6mdPpdBw+fFin2Veqsn37duzcubPRRV7btm2LyMhIrRQL1TcHDhzA1KlTG7WiqCrbt2+HhYWF2pppzYHbt2/j6NGjWLlypb5NUZm4uLhGP5joA6Mj1Yy4ceMG9uzZI1fGgyRJrF27Fv7+/kr3t2XLFq0WpWwIKysrrddGktDYrUtl+lT1XGXPN1Tn1MrKCtu3b8fIkSPVOn/kyJGNineZPXs2UlNTcezYMbX70CY9e/bEp59+irlz5+psTAaDgf3792P06NEKpZGaMseOHUNmZiamTZumk/EyMjIQExOjNUkndWmKK1IA8MUXX2Do0KH6NkMljI5UM4IkSYjF4nqf5BkMhsyPq1WrVli+fLlM28mTJ6OwsJBqf+zYMeTk5Ci9haSJyZ0giHrlYwwBbTkw1tbW2Lp1q8b77dOnD7p164Zly5ZpvO/6oNPpasm7kCTZaFkYGo2mlYDqbdu24fvvv0d6errafQwePBienp7YvHmzxiWAFFFdXY3JkydrNa6LTqdj586dmDRpEiorK7U2Tn2QJIkrV65QWaaWlpbYvHmzVscsLCzUaAzo+8yZM2dw6tQpfZuhEkZHqplSVVWFGTNmYOPGjaDT6Vi4cKFMWixJkqDT6ZSgMZ1Ox8aNG0EQBEpLS6UmoKqqKojFYplaVNpE35l72kTR+6LRaGAwGIiOjtbomEwmE0wmU+VVnqb4GURHR+Off/7BtWvXNNrvunXrsHr1auTk5DQqrsnExAQ0Gk2nGWZcLherVq1SSSpJVTgcDpYvX46YmBiUlJRobZyGEAqFqKyshKOjI6ZNm4YVK1ZofIzOnTtjxYoViIyMBEmSMpnR+iQjIwPr1q3Ttxlq0bNnT/Tq1UvfZqiE0ZFqhkgckKysLDg5OYEgCOTk5MgU2bx37x727dtH/U0QhFRBNHkTqC4n1ZKSEr1mwxw5ckRuEUoWi4UFCxZodWySJJGdna2Vft8H7OzsUFpaqvG6NE5OTsjOzm6UE/X5559DKBTizJkzGrRMMba2tpg0aZLU713TWFtbY/Lkydi3b59WvrvqwGQyYWtri5ycHI33/fz5cxw4cAC///47uFxug5nSusTW1haDBw/WtxlqYW5urlO9RE1gdKSaOYomzsLCQqlsGkObZE1MTNC9e/d6j2vb3rS0NOTn58u8TqPREBAQ0GRjEJSlQ4cOcHJyUmuZ3dbWFkOGDMHu3btVOo/FYmHs2LHYuXOnzjPZlEET3zlnZ2eIxWKdORt8Ph8DBw5EQkICnjx5opUxnJycMGjQIK2OoQqtW7dGv379kJeXhwMHDmhljMLCQjx58gQpKSkQCAT477//ZNqEhITAxcVFK+MroqSkBM+ePcOIESN0PnZjuXnzJpKSkvRthkoYHan3CEWTgEAgwC+//KJSfwwGA0OGDGmsWfVSt7BobVSd0Hr27KnRG5pYLMbPP/8sd7L39/eHtbU1/v33X42NJw93d3etany5ubmBx+Ph/v37Kp9rZmaGoKAgXL16VaXzGAwGgoODG1ULaMCAAUhKStJohWcajYYhQ4bg119/1WlMkybg8Xjw8/OTO9FrCktLS7Ru3VqrYyhLQEAAfH19UVxcjLKyMp1MykKhUO42ckFBAQQCAfU3n89Hjx49tG5PaWkpHj58qFWBaG2RlpYGkiSblOi90ZFqBrRs2VKp7DpFKygCgQAnTpxQyUEhCAJ2dnb1Hm/s1mBFRQVOnjypdHtFWFlZydUUYzKZajkjYrG43uvl7+8PKysrJCQkyD03ODgYt2/fVhirosx1cnd3R5s2bZTSwFP2O6JPTE1N0alTp0Y7oJp2pJhMJj766COEhITgl19+adRKWdu2bZGfn9+oQHVVsLe3h4uLC27duqXVcYqKigxGi5HL5SI3N1fj8XHq8Pfff0t9D1ksFng8nk7GrqqqwvXr13UyliZxc3ODn59fk9reM9yUKCNK8+GHH4IkSUovj8lkonXr1lpfYhcKhQaX8lsfd+/eBY/Hw9u3b6Ve53A4GD9+vMbHU+QIjRs3TmPFEDMyMnD06NEG23Xv3h2+vr64cuVKo8dsCHNzc7WcNktLS4SFhTXq8/Dz89N4pieHw8E333yDR48eNaofLy8vdO3aFQkJCY3uSxlatGiB9u3bg8fj4ciRI1obx9LSEkwmE8ePH9faGMrg7u6O/Px8g3Ye3r17p9YKrzpUVVU1uS0yAHB1dUVFRYVO7lWawrgi1UzgcrmU5pqFhYVKtWnU0bsTi8XIycmBo6Oj6sYqMXZDx1Rl4MCBSEtLk3sTI0kSmZmZWhvbUEhKSsKJEyeUait5albH2fPx8cGoUaP0Ip46Z84clJaWyiRWNBahUNjozK9JkybhypUrOnGieDwePvzwQ5iZmWnViTIzM0PHjh3xySefaG0MZWjRogUGDx4Md3d3vdohDysrK3A4HAA1zt6nn36qk3G5XC6GDx8Oe3t7nYynKe7cuYPbt2/r2wyVMDpSzYSPPvoIY8aMgUgkktoyqusQmJqaauSJvaioCN999x3Wr1/f6L50QVxcHPz8/ORu4wmFQo2XGlBEU3DShg0bBhqNpnYxy4yMDCxdulTp9jQaDWw2u9Gp+SRJIiYmRmNZWjQaDRwOp9F2cblcVFRU6Cy+atiwYVQcn7ZgsVjo06cP+Hw+du3apbVxGoLL5SIqKgrx8fE6W+1RBg6HAxqNhuHDh1PxPvfv39fZKn5RURFWr16NOXPmAABV5sbQ6devn86cTU1hdKSaASKRiIrbSE1NxaxZs+qdrGfOnKkwE06T0Gg00GiG8RWbO3cu7t27hz///FPfphiRg4+PD6ZMmYIZM2ao3Yc2JGX4fD4WLlzYaC3DDRs2YMuWLXjx4oWGLNMvNBqNqlivivi5pqHT6Vi9ejU2btyoMz0/ZaDT6fj+++/B5/OxY8cOrSee1IekniAArFmzRsqZotFoBpd5LCmi29QSOowxUs2AAwcOIC8vD35+flofS5XVlMGDB8PGxkYrVbpVhSRJTJkyBdbW1k2uam59NIWVLV2izRpJjaW5fVazZs1CYmKi3hwECbt378a3336r08KmyrBjxw7MmjULRUVF+jaF4ttvv5X6OyIiArm5ufj999/1ZJEs33zzDZKTk5uc3p5hLBcY0Qj1PV0cOHAAZmZmavW5bNkydO7cuTFmSUGn03HkyBFqpSoqKgojRozABx98INNWV1p7TCYThw4d0vo4mmTIkCEYN26cUm2nTZuG3NxcHDx4UOn+1a0q37dvX70WJvzmm280Nnm1bdsWkyZNwpQpUzTSn66YO3cuHj16hN9++01rY6xZs0avThSdTsePP/6IcePGGZwTBQATJkwwKCdKHj/++CNoNBpGjx6tb1MojKLFRvSOPOHdiIgIiEQiEASBefPmqRToGh0djdTUVCkHrX379liyZEmD5/7444/11qWq3R9JknIdwNevX2PGjBka2xpcsWKF3GyeoqIiTJgwwWC2IJWFIAhcvXpVKRkIgiBUcoxmzJjRqO3fu3fvqhQf9cEHH2D27Nlqj1cbbRTxbOxq0s6dOzF//nzk5uZqyCLFSD5vbbFkyRL4+vpqrf+G4HA42LRpEyZPnmyQW0Dbt283aI1QCSRJ4syZMzh8+LC+TaEgSRKDBg3CF198oW9TVKJpzR5GVKb205pEL682z58/x65du7BmzRrqta1bt4LFYsm0v3z5MuLi4sBmsxscVygUymROlZeXIyoqSim7SZJsUPB02LBhGDhwoFL9CQSCem+66jzR0mg0bNmyRWsOmDIToUgkkir2pylYLBaOHTuGP/74Q63zxWIxqqqqlG5Pp9ORnp4uVzxbFTTpPHTt2hU9evSQ+l2oi6mpKaqqqnSyvTdv3jycO3dOqzWd2Gy23h48bG1tMX/+fCxevFgvgsiKYDAYWLt2LRYtWqTxrFFNMnPmTHh6egKoEbE2NFsvXLiAoqIihIeH69sUpTE6Uu85QqEQxcXFsLGxoV6ztbWVu0pUWVmJ4uJitcciSbLeSuWqsn//fjx9+lRrmSjbtm1D586d693WJAgCLVq00Fuw5qVLl/D69WutLctrQ6dOEUKhUO26WgwGA4sXL8aSJUtQXV2tEXtMTU1hamqKwsJCtfsgCAKLFy/GsmXLdLb9xOPxUF5erpIjqwozZ87EsWPH9BLY7erqivDwcMTFxSEvL0/n4yuCy+Vi1qxZ2LFjh8HZRhAE5s+fTzm/R48eRVZWlp6tqp/y8nLcuHED58+f17cpSmN0pJoRXl5eCA0NBVCjUzdt2jSVzheLxYiNjVW5uvm0adO0kjGliKysrEY5dQ2Rnp4OS0tLWFpayhwTCATYtGmTXgOI8/PzUVFRAT6frzcbDAUajQYvLy8kJycbVFA3QRBo2bIlkpOTdaIbOHbsWJw+fRoZGRlaG8Pd3R05OTk6j0vy8fHBRx99hEuXLiE1NVWnYzeEtbU1wsLC8Ndff+HVq1f6NkcGkiTx119/Ub+NjIwMg1vNq0tJSQnYbHaTEV42OlLNCGtra7Rp0wZlZWU4fvy4VAD3V1991WDJfZIkVdLKqq6uxqFDh9ClSxeVV2bCw8PBZDJVOkcemlwRGjFihFIOoUgkUlg9+d69e8jLy0OvXr3kHj969Cg++eQTWFlZqW2rEe2ibrC9BCaTiWHDhuHIkSM6c+6CgoLw4sULlJSUaKX/r776CmfOnNHqA0x9SER4Hzx4oPOxG0IgEODFixcGWUSSwWBg6NChuHXrlkE9ZChDeXk5ysvLm0RNKaMj1UxITU3FnTt3ANRswdWNb+nbty/Mzc0BAG3atNGI7ppIJMKZM2cUtnF0dESnTp1kXq/tRF2/fh329vZyqxLT6XT07du30bZK8PHxQevWreUe04RjBwDPnj1DUVFRvYKhf/75J4KDg+Wudr1veHh4wMbGBjdu3NC3KRReXl4wNzenfk/qwGAw0K9fP/zxxx86mcB69eqFxMRErW3H9u7dGyYmJrh06RLKysq0MkZ9uLu7w8rKyuDkTpycnNCrVy+0bt0aly9f1rc5MrBYLPTo0aNJBL7L4927d7hx40aTsN/oSDUTHj9+jIsXLzbY7sWLF/Dz80P79u2V7tvDw0OhOHF95OTkgE6no3///jLH7t27RwV/nzlzhhLgrY1QKMSDBw8QERGh8tj10bFjR3Tr1k3qNbFYjNu3b+PevXs62YIx8n+0adMGTk5O+Ouvv9Q6n8FgIDAwUKM2tW/fHjY2Nkr9nuRhYmICf39/3L17V6N21UdgYCBGjhyJ+Ph4rawWEQSBkSNH4ujRo1qLvaoPZ2dntG3bFi4uLjodVxkkOwDe3t76NkUGDoeDtm3bwtvbWyktTkOloKBAps6Vv7+/zkNJGsLoSDVjJNteaWlplINw7NgxPHz4UKYtjUaDm5ub3H6++OILuatKdcepy7Vr13D69Gm57WfPng0Wi6XQ/tLSUsTGxipsowpv376VW9tFJBJh7dq1mDVrlsZWpXQFm82Gk5OTwja5ublgMBgGu5XI5XLV1gMzMzPD1KlTkZaWphFbbG1tIRaLkZ+fr3YfFhYWGDt2LDZu3KgRmxRBEASmT5+OjIwMrZYCSEtL08vWUPv27VFaWqq2o60tLC0tkZmZiU2bNhlcDTpTU1P4+vqia9euepXu0RYDBw6UmjusrKwaDFvRNkZH6j1gwYIFDS7Hc7lcrFy5ssG+hEIhysrKGrUtlZ+fr7GbMpvNVjpz78iRIxpfgufxeHqVWXBzc8OYMWMUtjl06BCsra0xYMAAHVmlPJWVlfDx8VG6uKg8BAIBFi5cqBF7hg0bhsrKSsTHx6t1Pp1Oh7m5eaOy/VSBx+OhsLAQS5Ys0VoAOEmSiI2N1UvNpvPnzyMxMVHn4yqCw+Ggd+/e6N27t75NkYuXlxe6d++OzZs369sUrbB69WowGAwqC7FXr15yCzrrEqMjZUQhQqFQ6gZ67949HDx4EKtXr1arP5FIhEmTJmlsC+2TTz5p0JHQFgKBAFu3bqWU3VWFyWTKdcJIkoRAIICJiUmDfTx//lztz8IQOHfunF612urSWAffw8MDU6ZMwdy5czVkUf2w2Wxs2LAB06ZN06qTw2KxsHr1ar1spwwbNsyggo0ZDAbCw8NRWlqKEydO6NscGWg0Gh4/fmwQslyapvZuwZw5c6hwk19++QXnzp3Tl1kAjI6UkXqQTPBLliwxqEDg2jQ2s6oxiMVijB49Wu0JjCRJxMTEICgoSOZYXl4epk+fjh9//LHBflq3bt3oQpZGmibarmAO1Dhr27dvx4QJEwyyiriumTlzJu7evav3ibs+evfujbFjx+rbDI1jYmIiFeaxYMEC5OTk6NEiaYyOVDOmMTfZvXv3wtbWVoPWaJ49e/bg2LFjWut/zZo1cHFxoWpzaZIpU6Y0WA+noS3DK1euKLUdqw7jx4/HkCFDtNJ3c0cXzr2NjQ02bNig1dVYa2trxMbG4ptvvtHaGIqIjo5GcnIyTp48qZfxmxrDhw+HpaVls4yLEggEBq15aXSkmjHKxu5kZWVhwoQJ9Z6rzxighsbXxaSljfe/cePGeoP7JTT03j788EPMmjVLk2YBANauXYuEhASN91sf+lxZrM3cuXMbJdDdsWNHfPnllzrZ1tMV+v7tGxrr1q3TqvyOukyYMAG5ubn49ddf9W2KxrG0tNSIVJM2MTpSRgBARm9p6tSpOpc6GDJkiFZWf/SFn58fFi1aJPcYk8nEypUrcf/+fZljkpWAhqDRaEhOTsb333/faFtrU11d/V6WgdiyZUujakfRaDTQ6XSNydTUh5ubGyIjIzFnzhytjeHi4oKoqCiNiUmryuzZs3Hp0iWVCgRrm/nz58PZ2dkgnP66HDhwAAkJCc3qd+vn54cpU6aguLhY4/c4TWN0pIzIpaioSKUbxsqVK+VmzxEEofRTrUTfrDlw/fp17Nu3jyqCKo/S0lK5k25RURHWr1+v1DjV1dVaq2T9vlFWVmZwAq7yoNPp4HK5Wq0wrosxFGFmZoaqqiqD+Tzmzp2LkydPGqRG3bhx4+Ds7KwVAXN9kpKSgmPHjoEkSYO/xxkdKSNyiY6OVqk2h6ura71ZPa6urjJbh3WJj49X+PQ5d+5cnVS4jY6O1ogQcllZGXJzc9U6VyQSaVQvbfDgwSgsLMSlS5c01mdzZPjw4Xj58qVC+Z/3gZYtW6J3796Ii4vTy/jjx4/H2bNn8fr1a72ML4/4+Hi8fPnS4JyViIgIPHjwANnZ2fo2ReM4ODigZ8+e+jZDKYyOlBG5BAYGKpV+T5Ik4uLiFGb0mJmZyZVlGTVqFMzMzADUSNzIczyqq6uxZ88etGvXTmPxGn5+fujXr5/M63v27IG/v7/ei3JyuVyMHDlSqbZOTk4ICQlR2MbDwwNBQUFwdnZWqs9+/frBy8tLqbZ1efz4MZ4/f45BgwYpfc6rV69w+/btBt+HtvHy8kJRURHevn2rVzv0jYWFBfh8Pp4+faqX8f38/PD69WuUlpbqZfy6hIWF4eXLlwazOibhq6++QmpqKh4+fGjwKzaq4u3tjcDAQLx48ULfpiiF0ZEyQiFxVE6dOqVSnMeFCxfU2pvv1atXg1t5YrEY58+fV7lvRTg7O8stO3DhwgWDSPEmSVIpdfb09HQ8e/ZMqWJ0QqFQ6YkgKCgIubm5ePbsmVLta8NgMMBgMFSSEsnJycGrV6/QtWtXlcczolnc3d3h7OyMq1ev6mX8vn37IiEhwSCcKIIg0K9fP53L4ijD//73PwiFQly7ds0grpUm8fHxgZeXF3JycnQms9RYjI6UEQpJTNThw4c19vT19u1bhYrtus4KSktLQ3Jycr3HDSFLqby8HD///HOD7UpKSpSupXL79m2VAqmTkpLkSgk1hI+PDzw9PVWS9LC1tYWLi0uTuWnqEysrK7i5uTUqKF4R3t7ecHNzU1tnsLGEhITg3LlzeovNqg1BEBgyZAhOnjxpMKtRBEGgY8eOcHV1xe+//67UA1dTg8fjoaCgoEltsRsdKSNaJTMz06B+EImJiQYTK+Tq6io3HovBYMDT0xPPnz9XeL6zs7NCDUQJ6mQZ2dvbw9raWuXz1MHHxwddunQxOM0yQ8TT0xPdu3dvlteqZcuWePXqlUGsCtNoNHh6eiI5OdmgsvRoNBo+/vhj7NmzR+vZobrGyckJHA4HSUlJBpWtqQxGR8oIhTZWY1q1atXkCztyOByFgfe2trYqy2e8e/cOY8eOha+vr8wxHo+HmTNnYseOHQr7uHPnDvbt26fUeKpMBkVFRejbty8+/vhjpc9RdywjhgOXywWNRtPbatDcuXOxYcMGlJeX62X82piYmODrr7/G6tWrDer7LBKJlFI8aGrweDx8/vnnDdbWM1SMjtR7iq62sG7cuIGNGzdqdQw6nS6lBq5JBAIBPvroI0RERMg9XlFRgVWrVqlcBX7x4sX1ZuaJxWKIxWKlakkpi4mJidJB9Dt27EBSUpJa46g66TAYDNDpdIOIQzGkCVMfDBo0CFZWVnrRPuRwOFoTXVYVyb1RU0LYmsTMzAwLFizQtxkaZ/LkyTh9+jSePHmib1PUwuhIGdELmiocR5IkOnfujHnz5mmkv7ocPHhQYbzSuHHjUFZWptEx8/PzNSqHIBaLMWzYMAwbNkxjfWqK/v37w9/fH+vWrdO3KUb0hER8OTIy0iC2q+zs7AxSCNzCwgLLli3DjBkz9G2KRiEIAitWrEB6erq+TVEboyNlRC+MGDFC5glUleKdEk6dOtUslc41yebNm3UmsqrOZ2jk/YXH42HdunWYOHGivk0BUFMCY+nSpQa3Ouno6IgFCxZg2rRp+jZF46xduxZWVlb6NqNRaL/CoRGdoqlJTNV+mtrkWZ+9ERER2Lx5M9asWYO0tDSl+2pq719bGIpunhEjqtKxY0d89NFHmDx5ssF9h7Ozs7W26q5PYmNjsWDBAoOIi2sMxhWp94S6E/2gQYOUVnWX5yhYWVlh586dGrNPQq9evRAVFSXXBk3Spk0bLF26VOZ1kUikcuC4Ivh8vtJyL82Bv//+G1evXsX8+fP1bYoRIypx584dbN26FSKRyKA063x8fDBz5kyDsklTzJ8/v8k7UYDRkWrWKHI+TExMwGazle7riy++oLLv0tPTsXjxYo1IqdQmPj4ev/32W712rVu3rl7tupYtWyodHJqYmIi4uLh6i4HKu25BQUFyHTxFPHv2DGvXrtX4daqLg4ODXKdQH3Tp0gXt27fHrl27lD6nffv2mDRpkhatapg5c+bgjz/+wL179xrVj6GtZBhRjh49eiAkJMQgkh7qkpqa2uwy9Wg0GhYtWtRs6mAZHalmDEEQiImJkVlhUWd1x8LCApaWlgBqZFvkybnMmjULLVq0kHu+jY2NjFo9QRCYNm0aJV1SUlKCoqIiuefHxMTA1tYWNJrsV/bhw4c4dOhQvWPXpby8HPn5+fUeX79+PQYPHgxvb28AwNWrV3H69GnY2NjIbb9mzRqEhYXB1dVV6nWBQIB3794pZZO6pKSkYPfu3bC3t9fqOMoikYgZPHiwUu2vXr2Kc+fO6axmVX38/PPP6NixIzw8PNTuw+hEqU5paanWs3qVoaESJ/oiICAAAwYMUHi/amqw2WxMnjwZR44caTa/GaMj1cyo7SQRBAF/f3+5jpMqztTZs2flVlKu3cf27dvh6uoqtwxBZmYmjh07hlatWkm9vmvXLtjZ2YHD4TRow+PHj+v90RUXFyM1NVVlB9HW1hZjxoyRef358+fg8/mUDmBeXh4yMzPr7f/Zs2f1FtdsDDQaDZMnT5brPEooLy/Hq1evNDpuYygoKEBhYaHSDkleXh6ysrK0bFXDdOnSBW3atFF7Mg0MDISnpyfi4+M1bNn/0aZNG7Rs2RK//fab1sbQJTY2Nhg9ejS6deumVzu6desGJpOJy5cv69WOugQFBaFly5Z4/Pixvk3RGBYWFggJCcG1a9fw8uVLfZujMYyOVDOCIAhYW1tj0KBBqK6uxtGjR6WO9+/fH/b29rh+/bqMbItAIMCJEycwfPhwqq9BgwbB2toaT548QWZmJry9vSnbi3wAACAASURBVBEcHAygZgL/7bffqPZXr16tV0ahuLgYN2/elHFEEhMT5S7t8vl8uaLCihylt2/f4sqVK/jiiy/qbVOb7Oxs/Pvvv0rp1EmwtbXFgAED5B5TZJs6K4DV1dU4ceIEgoOD9RLI3tgxrays6r1Whsjbt28hEAjUPp/P58PGxkYtWR1lcXJygq2trVbH0CVcLheBgYFyV7d1iYeHB+h0ukLpKF0TFBQER0dHJCcnq6V5aYhYW1sjODgYOTk5uHXrlr7N0ShGR6oZ8fbtWyQnJ6N///6orq7GL7/8Qh1LSEhAcHAw7OzscOvWLZmnHIFAgPj4eAwdOhQEQeDKlSvo1auX1JaLt7c39fRIkiRKSkq0UrXc0dERvXv3lnssODhY7spPXl4eEhISMGjQIKXGePPmDS5cuKC0Tbm5uUhNTcWnn34q9/h///0HDw8PmS22srIyJCUloUePHjLn3Lp1C05OTuDz+TLHqqurcfLkSaXtYzKZcseojbOzM/z9/ZXq79mzZyBJUm7l9YbIy8tDcnIyPvnkE5XP1RcXLlxoVtsnTYXKykr8+eefehu/devWKC0tVTpDVxf4+/vDzc0Nr1+/bjZOM1BTfJcgCIOR6NIkRkeqGZGXl4dr167JrQ57/fp1lbIj4uLiUFJSIvXau3fvkJqaCqDmaXL8+PGNstff3x8mJiYyrxcXF+PFixdyz+ncubPWg7flkZycrNCxOXLkCDp37kzFVUkoKCjAkSNH0K1bN7Rp00bq2C+//AJTU1N06NABjo6Ocvt98OAB/P39FWYSVlVVISUlBWPHjq23TXZ2NmxtbdG3b99629TmypUrIEkSPXv2VKp9bdLS0nDo0CEkJycjICAAAQEBDZ7D5XJlrp0RI9qmT58+yM/P15oItDr4+vri8ePHzcqJsrCwAIvFwpkzZ/RtilYwOlLNiFatWqF///7YtGkTCIKAs7MzMjIyQJIkZs6cCZFIpHSWhJOTExgM6TJjSUlJOH78OJhMJhwdHSEWi5GRkQE+nw8+ny83lofD4YDP58PBwYF6TdJ+zpw5qKyshKWlJfWalZUVXrx4gd27dwOo2WKqvWKzYcMGaiuAx+NR55iYmMDW1rbBeBsLCwvweDywWKx6A7QdHR3lOniAegHF5eXlWLduHRYsWCCzZfbzzz/D2tpa7gocSZJYuXIlZs2aVW+GIVDj4G7cuFHmWtXmwoUL+O2331BeXq50UD5Q8/mpKn8D1Kz4/fjjjxg9ejTmzp2rsG1FRQXYbHajHXMj6mNlZaXzBxShUIg3b97odExDx9bWFomJiXj9+rW+TdEYXC4Xbdu2bXDFvCljLMjZDGCxWDAxMYGpqSnodDrMzc0p2YUxY8aAw+GAIAhs3rwZOTk5MDc3p4LC6XQ6FewtcZwsLCywcOFC0Ol0sNlsmJubU4UWzc3N4eLigujoaOTl5WHx4sX44YcfUFpaCpIkpWqdmJiYoEuXLggPDwdBECgpKQGdTseGDRtQUlICoVCIpUuXIjIyEi4uLpQ8xP379wHUOFFWVlZYvHgxxo8fj+LiYsqRMTU1RWhoKD744AP8+++/uHLlCsLDwxXWL2Kz2ejfvz8YDAbu3r2LCRMmUAKt5ubmKCkpgbm5OWbNmgV7e3twOBwwmUwQBAE6nQ6RSCQlByM5B6i5WdR1PIGagHEOh9OoNF9l4pVIkqQctrCwMLkOX0JCAiorKxEZGYlFixY12GdVVRV8fHzw9ddfY8eOHSpL4RQXF2PhwoXYs2ePwnZ37txBeXm5whW1hqj9WRhRnbCwMLRu3Rrnz5/X2Zhv3rzRSi06VTC0rLGIiAjY2Njg6NGjuHv3rr7NaTQsFgsffvghzMzMcOjQIX2bozWMjlQzIDQ0lIpHodPp2LNnD0QiEQiCwM6dO0EQBJhMJr7//nuIRCIANZPzv//+C39/fyxcuBAikYhyBHbs2IExY8ZAKBRiyZIlaNmyJQ4ePAixWIzdu3cjOTkZY8aMAYvFwr59+1BdXY2JEydK6WTRaDR89tlncHR0pCZIBoMBBoMBoVCI8ePHQywWg06ngyAI7Nq1C//995/U+7KxscH69esxevRoAJCqNRQVFYWAgADEx8dTWniKnCg6nY4JEyYgJSUFp06dAgDKLlNTU+zatQthYWHYtGkTZsyYgTlz5mDSpElgs9kwNTWFh4cHtmzZQq2umJiY4IcffsCIESMgEomwdOlSODg4QCwWUytzJEnCxcUFs2fPRlRUFHV9GAwG9e/aq3g0Go1yRCWOm7KUlpbi22+/xYEDBxS2kwgiK9P/2bNncfbsWbRu3RoxMTGYOXOm0vaoisQJV/V9AzXXatu2bRg1apSWrNM/ku+JqtdGFX799VdcvHhRa/3XxdnZGVOmTMHs2bN1NmZdDKn4Jp1Ox9q1a/VthkYZPHgwCgsLceLECX2bolWMW3vNAJIkcfbsWaxfvx5isRjv3r1DREQEhEIhwsPDERYWhoKCApAkiRUrViAsLAyhoaF49OgRFixYgMzMTIwbNw5isRgikQjh4eGorKzEpk2bsHfvXoSFheHs2bMAgJs3b0qpj4vFYowYMUJGbHTs2LH48ssvqSc+BwcHHDhwAHv27MGIESOom9eGDRvg5eUl857c3NywZcsWhe/7p59+UigoXJtFixbJzdCztLSUWjEZM2YMCgsLAdQUAL148SJOnz4tZYupqSn27t0r9TQbHR2N8PBwJCYmYujQoRgzZgwCAgLw7bff4ttvv0V1dTVGjBgBkiSxe/duSltqxowZyM7ORmFhIVXw09bWVqWClqpw+/Zt7Nu3zyBq99Tm+fPniI2NxbZt21Q6r7CwEBMmTNCSVYbD8OHDYWlpqbXvxfvK9u3bcfXqVX2bAQBYuXKlStvuTYFjx47hr7/+0rcZWse4ItUM2LdvHwCgU6dOSE5OxoIFC8BisWSWrWfOnImMjAyp1x4/foylS5eCy+VKTfZ1GTlyJAYMGICbN28CqImp2LBhA8LDw2Xazps3D23btsXJkydx7Ngx6vXS0lKpOJi4uDhwOBwsXrwYz58/p14PCgrCzJkzUVBQgG+//Vam/5UrV8LV1RVJSUnKXB6KLVu2IDExUeZ1oVCIr7/+mvp769atDYpoikQijBo1Su61kmyDPnjwQKYI6f79+6UCx9etWyd1vGXLlpgyZQrGjRtHvTZy5EiZcUJCQmBtbY1r165h1KhRmDNnDqqrq2W29WJiYnDq1Ckq3bhDhw6YPn06CgoKFL6/9401a9YgLi5O6ntopPkzZcoU3L17FwkJCfo2pdkxadIk3Lp1S+X7dFPEuCLVTOjbty8iIyMB1Dg5GzduREREBABg9+7dMDc3l1rC/vzzzzF27FiQJAl7e3usWrUKY8aMoSbhHTt2YMWKFVLFHs+fP49NmzbBzc0NGzZsACA/xoAgCPz0009U+QUfHx+sXLlSpj1BEJg9e7bcDL309HRMnz693v5jY2Nx5coVpa7N6tWr4e3tLdOXs7MzYmNjZYR2CYLAkiVLqFit2tja2mLdunXUtavLuHHjIBAIKOe2bhuCIDBp0iS5jkyHDh0QGhqKOXPmSJ1Xt48xY8Zg0KBBIAgCT548kYp3qt129erVOHjwoFRG0p07d/DNN9/I3abbsGEDlRQQERGBzz//HN26dcNnn32mE+28N2/eICoqitqeVraWlUAgwPjx4/HDDz+AyWSqNfbChQvxxRdfoGPHjmqdb0Q99B2jZBQc1w7R0dG4fPky9eDd3DGuSDUTGAwGHj9+jNOnT2PZsmVgsViUbhSLxcKcOXOkMmQYDAZu3bqFhIQELF68GAwGg2q/ZcsW8Hg8CAQCkCSJsWPHIicnB5cuXYJQKARBEKisrJTa4pOwYMEC+Pj44Nq1a1JxQEVFRYiJiaHabdq0CWw2G1VVVVI30+DgYAQFBWHlypVyda+WLVsGJycnCAQCpeNFWCwWNm7cKKOjRhAEBAKBlJOwfv16WFpaQiAQyI2dIAhC6trWJjIyEu3atUN8fLzMVieDwcD69esxbdo0KsC9Nh9//DF8fX2xefNmhYUhp0yZgqCgIJw7dw7x8fEQi8X1tmexWBAKhVLvQywWS9lOEATlFK9fv57KiDx+/Di11fvgwQO1ilVaWlriu+++oxz8hiBJElVVVaDT6WCxWCAIQumJtrKyUiXtyLoIBAIwGAyNClY3FaKiovDff//pPLg5MzMTK1as0OmYtYmMjERCQoLBlBlYvnx5s0iYmD17Nk6cOIHXr18bTPyZtjGuSDUDBgwYAB6Phx9//BFlZWUgCAJLly4FQRBYtmwZGAwG8vPzKccjJCQEBEHg0KFDKC8vh1AoxPLly0Gn07Fs2TLY2tpi6dKlVKyQZDWrvLwcrVq1wuDBg7Fy5Uq5BQwtLS1x6NAh6kkkMDAQ33zzDUQikdQqjLW1NZYtWyazMsNms8FiserV3LOyssKOHTvqrTNVlwULFsDKygpFRUVSzoC3tzeGDh0q8z6sra2xdu1aZGdny/Tl4uKCiIgILFu2TO5YFhYWOHXqlMxKGZfLxeLFi2FtbS33mvXv3x+ff/45TExM5DpZEqZOnYq2bdvi9OnTOHv2rMIsuvnz5yMuLk4p+RVra2ts27YNWVlZ1I2vrKwMFRUVEAgEDd7cx40bhyVLlqBr165Sr9NoNFhZWems0OXixYsxb948vdQZa8ocP34cDx480Llgb4sWLfSaIGBhYYHKyspGVbTXJKNHjzZIvT9ViIqKwq+//orXr1/LPEw2Z4wrUs0AW1tbkCQJMzMzhIaGQiQSISMjA9OmTYOnpyc2b95Mpd8PGTIEPXv2xD///IMWLVrgyy+/hFAoRGpqKuh0Ojw9PbFx40Y8e/YMYrEYoaGhePr0KW7fvg2gxilo0aKF3DonEyZMgI2NDbKzs6nJ18zMDCRJYu/evQBqJtfIyEjQaDSkpqZK/diCg4PRq1evBifejIwMVFRUKHVt3N3dceDAARmHgsPhwM7OTqqicVRUFBgMBtLS0uTeXNlsNhwdHamipLUZPXo0EhMTcffuXcoBBWqclDFjxsDl/7F33uFRlGsfvmd7yqZXUkkCSWiBABJ6kYgIogY9ImCBI00sR0EBEUXBeuRYj4iVo4CAiCBFBEEIKJ2QIL2EhABpkJCebJnvj7jzsewmJCSk4NzX5YWZnfLOlpnfPO/z/J6gID744AO7ERZPT0/S0tKua1YXFBTE+vXr2b59u9UxruWpp57i119/5fjx41W27YFKN/SJEycClR3ma/v0OGHCBLRaLSkpKSQlJXHu3Dmr14uKivj444/tbnvvvfcSHBzM3r172blzZ62OWxWpqamEhobecFRpxYoVxMbGYjQab7kWFtXRq1cv9u3bR2pqaoMdMzQ0lN69e/Pbb7812DGbMmPHjuX333+XTJM7duwoXZ+aU4uYzZs3k5qaelOrS5sickSqmdO3b18KCwvZu3cv7u7u+Pr68sMPP6BQKOjSpQvffvstu3fvlgRLZGQkJ06c4ODBg3h5eeHm5mbTaHXPnj3STbVNmzZcunSJrKwsIiMj6du3b5Vj6dSpE7/++qsUzWnXrh09e/aksLBQ6u0nCAJdunRh8eLFNjf5oKAgysvLq7y4jhgxgvXr11crIuyRkpJCUVGR9HdERAT9+/e3Wa9r164sW7bMrkgLDg4mNja2yoax7du35/z58zYi0MHBgaioKJYsWWI3X8Dy+a1fv77aXl/Dhw9n27ZtJCYmcunSJbvrKBQKHnroIU6ePMn+/furFVGOjo48+OCDdO7cmSVLltT6wjdixAjS0tI4ceIESUlJ7Nu3z8Zc0WAwSAL8au666y769OmD0WgkNzfX5nWz2czixYurFHatWrW6KeZ+x48f58iRIwQHB/PQQw/dkKt7cyQqKgo3N7cGPWZxcTHZ2dmEhoY26HGbKjExMRw9epTIyEicnZ0JDQ3F2dm5WU313XPPPRw5cuRvJ6JAFlLNmu7du3PXXXdRUlJCSUkJnTp1Ij8/XxIioijy888/S1GQXr164enpSUpKCmazmfbt23Pp0iUSExNRq9U2jYL79u2Lq6ur9HdISAhBQUF2K9/i4+PRaDRs375dujmGh4fj7e1tt2pjw4YNdkO/aWlpVUYDBg0axI4dO2p8cbnjjjvYtm2bTWuc4OBgQkND7ZY9//LLL3anOPz9/YmKirKb4N6/f3/2799fZSVceXk5GzdutPtabGwsxcXF1T51Dhw4EK1WS2JiYrXROoVCwaBBg/jll1+qvZi5uLgQHx9P//792bhxI7/88kuNc5EEQSA+Pp4777yTrVu3smHDhiqnYe3Rt29f9Ho9+/fv55dffrE7RWv53tqjVatWREdH23XRh8qCiLpM1SQnJ5OamkpISAidO3eu8XZnz54lKyuLrl273vCxG4vGSPi2FHhU9Tn+XREEgejoaPz9/REEodqHoaaCQqGgT58+f+vP8u975rcAQ4cOpaKigqysLKKjo+nYsSNHjx5FrVYTExNjI0juvfdeioqKyM3NpUOHDkRGRko3cK1Wy6hRo6y2GT58OLm5uVYC4dy5c3anoEaPHs3x48dtojlnzpyRfESUSiWdOnXiwIEDNhdvew1/68rDDz/MypUrraJRFs6fP8/atWtrtB9fX1/CwsKqfP0f//gHW7Zsselir9fradOmTZU3qqioKHJzc8nJyan2+CNHjuSnn36qVkCqVCpiYmJISkq67o3R09NTsrJYsmRJtetejVKpJDY2llGjRpGUlFTrJ8+YmBhGjBjBjh07pF58tSE0NJQ777wTV1dXtm7daned7777rsYu8p06dbK7/ODBg+zevbtWYzty5AgpKSm0bduWNm3a1GrbvyMBAQF07dpVMsdtDE6cONHkIj4pKSlERkZiNpspLS1Fr9c39pCui0KhICIigh9//PFvk1x+LbKQasacP3+e7777jrS0NFQqFX/88QdLlixBr9czYcIE3nvvPWnd4OBg1Go133//PVlZWbi7u3PkyBErQ0uTycS8efOsbsSLFy/m5MmTeHh4oFarbcTC1XzyySdVTjtBZY7RU089xX/+8x+bm/3QoUMJDw+vcvvQ0NBalynbExTu7u54enpaLRMEgdDQUFJTU+1u06VLF/r161ejxO2rCQkJYcSIETa5QxZGjhzJn3/+WW21VEhISI3O28HBgUmTJvHBBx/UaGyFhYXMnz+/RutCpVBr3bo1Tz75JGfPnuXDDz+s1dNySEgIw4YNIysr64YiRn5+fvTt2xdPT886J69bPm+LhYQ9CgsLa90H7uTJk+zYsUOyHZFp2iQmJl73Iaah8fLyorCwkN27d7N161bOnDmDUqnEzc3Nql9pU8JoNPLVV1819jAaFTnZvBnzySefAP9v2Lho0SKUSiUuLi42N5uZM2diMpkwGAwMHz6cvn37kpiYCFQ+Ubi6ulpt4+7uTkFBgTT9NnjwYJRKJd98802Nxubk5FRto117bNiwocqE6zlz5pCfn1/jJx5Lhdy1wuiOO+4gPj7eKl9JpVIxd+5cRo8eXeX+Dh06JDVSvhpLReC10RmtVoteryc7O5u3337bZjtXV1eKi4urFRWW6su8vLxqz1uhUODi4lIjk021Wo2jo2OtpuOg8gL//PPPk5uby+zZs2u1rYeHB7Nnz2bcuHE3VMnj6urKyJEj2bJli+TPdaMoFAp8fX2ZPHkyzz//fJXr7du3r1oPHFdXV4qKipp9PoirqyslJSVNpnKtobB8p9asWVPjCuCbieXadv/997NlyxaCg4MRRZHk5GRcXFzo3r07sbGxVVYMyzQuckSqmaNSqRBFEaPRKIVYn3vuObs3iblz53LmzBkUCgVGo1HyhAoKCuLll1/mX//6l7TuvHnzeP/996VoV3WVUPZ8lR566CHuvvtu6W9BENBoNDd8noIg8Nxzz9UoFK9QKPjwww957rnn7CaO//777zbNUquaDrP0B6yK9957jzlz5tgkTXfv3p0JEybYvUFpNBpee+01li5dyuHDh697PlOmTLHJ87IgCALBwcFMnz69WmFgoX379gwfPpzXXnvtuuteS3FxcY2OcTV6vZ533323TqX1M2fOZPXq1XX2ORIEAX9/f2bOnFnr84DKz83yHZ4xYwb+/v4261Tn61UXjEYjoihW+128EWbOnMkPP/xQo+/hrcTMmTNZtmxZkxBRGo2GOXPmUFZWhiiKDBkyhHPnzkm+d3l5eezduxeDwXDDhrMyNxdZSDVznnrqKXJycli6dCmdOnXipZdesuumbVk2depUevbsyerVq/nyyy+Jjo7mjTfesHH2vjoCMnnyZO644w67xxcEga+++oonnnjCRuRc7Rju6+vLRx99VKVgudZdvKrx3wyq2//o0aO5//77q3RYN5vNVW57+PBhXnnlFZttPv74Y1xdXa97Ttd+DvaIjo7mlVdeuekJwzfi/uzm5sYHH3yA2Wy2aWrdGLRq1YrXXnvtht4rjUbDJ598wieffIJarWb69Ok27ZagMifQnlFtXVm+fDlXrlyxarEk0/xRKBTMmzeP0aNHo9VqOXv2rN2Hv5ycHD7++GNefvnlRhilzPWQhVQzZs6cOTaVRampqVLzWwsLFy7k+eefl3J8PvvsM6l9C8DFixclPyGo7Af35JNPStM4Xbt2ZfHixVVO61V1Y1q9erVV5Ke4uNiqp52F2bNns3fvXqkx8tVoNBq++eabemnjMHnyZIYOHWq1zNXVlU8//bRaY8B169bZNNNVKBQsXLiQcePG2USLhg8fXmWezFdffYVGo+HZZ5+168VlQafT8cUXXzBmzJjrTh9duHChVu7htSUqKorXX3+91ttBpWP4448/fkPbAvz3v//Fy8vrhre30K1bN6ZNm0ZOTg5PPfVUrbZ1cXHhgw8+4PHHH+fxxx9vtEqqjRs3snv37psi1GQal2+++YaEhARiYmJwcHDgwQcftLHfKC4utmoHJdN0kIVUM0YURT788EM2bdpEfHy8zQ1CEAQWLlwoTf+98cYbVhVFPXv2tOrrplQqWbhwoZVosURQ7HXw1mg0fPXVV4wZM8Yq2jB16lQrvylLrz17N/F33nmn2oo4C2PGjKnRDczBwYHPP/+cxx57zG40Z8WKFTXOs/nXv/5F//79qxUfVb2WmJgotV65lsmTJ1ebz+Th4cFHH310XfHYs2fPKvsR2uP222+nR48eNySKsrKyePbZZ2u9XV0RBIEXX3zRqufjjXLq1Klai5AWLVrw9ttvN4hFgEqlkvIe7REfH0/Xrl1566236u2YL774IiNHjiQmJqbe9ilzY/j6+nLu3DmOHTtGQUHBdX//t912W53SJWTqDznZvJkyd+5cqaIuISGBQYMG8eeff0qVWFqtlvfeew+VSsVTTz1FcXExKpWKjz76iKSkJAYPHkxCQgKnT5+2uuGrVCqefPJJysrKmDNnDkFBQZhMJhtR4urqyptvvolKpbISOC+99BIREREsXrxY8rMSBIG8vDybaS7L8ebNm2c3R8PV1VXqxVXTKIAgCKhUqiqnka4+lxYtWvD000/bRPAsKJVKli9fzq+//mq1XKfT8fbbb/PUU0/ZvC+PPPIIvXr1YufOnVVGkqo7l5CQEKZOnYrJZOK5556rcr3Bgwdz9913c/r0aT766KMq17OQkJBAfHx8rQ3zunbtyiOPPEJhYeENTc3VhwCx5AfVFVEUa50cLggCpaWldr+79YmzszOzZs1i2rRpVa6zbds2duzYUa8J7kajEaVS+bfzAKoulaAhMZvNzJgxA7PZjCAIeHt7Ex4ezi+//GI3J9BiaLx3714OHDjQ6NPlMpXIQqoZMnv2bBYtWsTp06cxGo3odDp0Oh1Go9Gq/5per2f69OlcvnyZWbNm4ePjQ3FxMQaDAa1Wy5EjR1i0aBFFRUU4Ojoyc+ZMpk+fTn5+Pq+88gpBQUEsWLDAblNPhUKBTqdjxowZVsudnZ353//+x549eygvL6djx4488sgjmEwmmxwqS/+54uJiuxcEQRDsHqMqPDw8qrwRTZgwgcOHD0s+Wa1bt2bcuHHodDq7CexPPfUUkZGRHDx40CpR2t3dneeffx69Xm+3L56DgwOJiYk2HlVKpZJXX32VWbNmWQkpPz8/SktLpSo6pVKJ0WjkzTffrDax3vL5LV26tNqee1CZ+N+9e3f279/PypUrq133WtRqNVlZWXYrFqsiKiqKkSNHUlxczJw5c2p1vKaIKIo33W9IEAT0en21x7lZlXULFiygsLCQwYMHI4oiGzZsuCnHkbFGq9Xy7LPPShHG3Nxc3NzcWLVqFadOnbL7wCWKovTQKYuopoMspJoZzz//PKGhoWRnZ2MwGLjvvvvo3Lkz+/bt48cffwQqBdSTTz6JKIpkZGQwZcoUwsPDWbhwIWlpaQwePJjevXtz/PhxcnJy8PDwYOLEiQQEBEgJtAEBAXz++eccPHjQJvnR19eXESNGMG/ePLsJtzk5OdLN3cnJifLycr7++mub9Vq0aFFlY10fHx9GjBjBf/7zH7vHsIdKpcLT05N///vfVsvHjx9Pp06dOHjwoHSj0ul0KBQKm9ynq89x3bp1JCUlWS1XKpV4eXkxb948m21GjBjBmTNnbPrtQeWNMiAggNDQUAICAlCpVFRUVEjGe4GBgQQHBxMREcGWLVvIzMwkMjKSoqIizp8/T2BgIJmZmRiNRu68807MZjMrV66022LlWjw9Pdm7dy+bNm2qlQdTXFwcwcHBLFq0qFaeSjqdDrVazY8//lhr762/Ix4eHowYMaJGkcWbgcVLyc3NrUlEaW42kyZNYsmSJdV63jUEBoPBquWUXq/Hzc2NwsLCak1lq6rglWk8/l7x3FuAdu3asXjxYoqLixk2bBi9e/fm1KlTrFu3jvT0dLy80mQVrQAAIABJREFUvHj44Ydp3bo1X3/9NWPHjiUmJoalS5eyZ88eqTHwhQsXJB8prVZLWFiYJHbGjBmDWq3m2LFjditIHBwcCA8Pt5mOGzlyJO7u7jbrFxcXc+LECatlY8aMQaPRcPz4cbsXBp1OR0RERI3Lsn19fbnzzjtZuHChTQQtMjKS3377TWrKGhkZyR133EFZWRnHjh2rcp9nz561KzxMJpPNuBISEoiLi6OwsNDG5E+n0zF69Gj27t2Lk5MTfn5+eHl5ERISgk6nk8wu+/bti5eXFwcOHKBz586o1WrJlb2wsFCaRgwMDASokUgZOnQop06dksRZTenZsye+vr4kJSXZbdJcFVFRUURERPD999/bfOa15ZFHHqm3cu+mMpVzLf7+/jzwwAOEhYVx9OjRm3YcV1dXHnjgAbuvDRkyhKCgIPbs2WO3ndOtxsGDBzly5EidLDnqA6VSKeWmDRo0CJPJxG+//dboAk+m9shCqpmxZs0atmzZQu/evRkwYACpqan88ssvnDp1Cqh8qunUqRM//fQTW7dupX///qxbt44tW7ZQUlJCTEwMmZmZbNiwgWPHjuHp6cmAAQMwGAxSH7l+/fqxceNGuyLKz8+PmJgYu73jevXqxZ49eyQh0apVK7y9ve32tLMcw56I8vX1rbY5sj1cXV3p0qWLdOwOHTpYvX7gwAHJlb1Fixb4+fnZ7ZsHlUm9SUlJNlEYd3d3unfvbnfqo1u3bhw5coTz589bLddoNLRu3ZrevXuTnZ2No6MjarUanU6Hg4MDZWVl+Pr60q5dO/R6PRcvXqSiooLy8nJKSkqkKb8rV65gNpuJi4sjNze3RiJlwIABqNVqUlJSbMZVHbfddhu33347QK06z0dHR9OqVSsyMjLsNiuuLX369OHXX3+9pZ/A3d3dad26NZs3b75px/D09KRnz55VTgGXlJRgNBo5ffp0vST1N3WcnZ3rpQq4riiVSnr06AFUNmwXRREHB4dqPftkmiby1F4zY8WKFUDlU2RGRgZr1qyRIgYeHh506NCBkpISVq9eTffu3QH44YcfMBqNdOzYETc3N9avX8/hw4fx9vamb9++9O3bl507dyIIAnFxcQD8+OOPNuFlPz8/BgwYQHR0tE0ZblxcHElJSaxcuVKqSIuOjsbPz88qfH01q1atsjtt2L9/f3r06MHevXtp06YNer0es9mM2WxGqVRiMplQKBRWRqSWpPiuXbsiCALdunXj0KFDdOnShcOHD0tTesHBwYSFhXHx4kW7YrBbt24kJCTw5ptvWkVwPD096dixI3q93qY/XZcuXdDpdGzdulVqB+Pg4ICrqytubm706dOHrKwsqQG0wWCQpj8zMzNp3749Op2OHTt2SIL40KFDeHh4WB0nJiaGsLAw9u7de10jwa5duxIcHMzatWtrNP13Nb1798ZoNNZ4ShUqRfMdd9xBdnY2a9asqdXxrsXy+e3evZuVK1fW2W6gRYsWtGrVqk77uJkUFhZW2aS5LgQEBBAQEICzszMODg5WlidXYykKaUjy8vJITU2lY8eOdTZarS1Dhw5l165dTcrNvaKiguzsbFxdXWnTpg1Hjx61SQ+oT1xdXSkrK2v0qNytgiykmiERERGoVCpWrFghiSh3d3d69uzJ0KFDOXToEGq1mokTJ3L8+HFEUSQ8PJxHH32U0tJSCgoK8PT0pF+/fgwcOJADBw7w9ddfo1QqmThxIidPnrRrHdC+fXt69+5tt3XG+PHjeeaZZyTB4uvri7e3d63PrV27dvTp04f9+/ezdOlSHnvsMWJjY8nNzcXJyUkSTjqdDrPZTElJCUqlEmdnZ7KysmjRogUJCQmYTCZat27NmDFjWLhwIU5OTrRq1YoBAwbQo0cPTp48SVhYGGazWXIM1mg0jB8/nvT0dJsLTFhYGHFxcXatA8aMGUN2drYkCnU6HS1atKB169YEBwcTGhrKoUOHEEWRoqIifHx8OHjwINnZ2fj6+uLo6EhycjLLly+32u/VIf6wsDB69erFmjVrbPynVCoV/v7+aDQaFAoFZrOZsWPHsmDBArsJ8VURFhaGUqnE0dGRLVu22DS9ro6BAwfi5+dXZ4dsizt/v3796s12IDg4mOjo6CbXoPZm4uvrS6dOnYiKiiItLc2qp2ZTID09nc2bN3Pfffc1uJA6c+ZMk2vt4+fnR2JiIufOneOuu+7Cy8uL0tJSBEGocRPumuDs7ExJSQkeHh7k5eXJQqqekIVUE0Wn09l0/raEo6dNm2ZVju7s7Mztt9/OoEGDOH78OEuXLsXb25usrCzmzp2Lt7c3U6dOpaysjK+//pqTJ09y7733Eh8fz4EDB/j0008RBAEfHx+ys7N5/fXXbW5gzs7O6PV6jh8/zpdffmn1mo+PDzk5OZL4cnNz44EHHqBdu3ZSHta162dnZ9uINcsxTp48yZdffomzszNHjx4lNjaWpKQkevfuzdmzZ/Hy8kIURbRaLVqtFpPJhNFoxM3NDa1Wi5OTEyUlJTz++ONcvHiRhIQEysrK0Gg0qNVqMjMz0ev1PP300xgMBi5fvoyHh4dk/Lho0SLMZrOVEHR1dUWtVuPl5SVFxkRRRBAEjEYj3333HUVFRbi5uREcHExAQAA+Pj60b9+epKQkNBoN58+fJzk5mb59+5Kfn4/JZOL+++8nPDz8unlIzzzzDG+88YbdpG83NzfGjh2Lj4+PdJy8vDxGjRrFBx98QH5+PgaDgfLycjQaDXq9XsoZskT4VCoVo0aNwtHREaDGF28PDw+USiVlZWWsXbvW7jRuTVEqlQQEBDBlyhQmTJhww/u5lpSUFNRqtZWHWlPAMr1b1ybM9hg8eDA5OTm8++679b7vmuDl5WUTCXVxccFgMEgPHJcuXbJ5eGgIFi9ebDdtoaERRZHc3Fz0ej2CINCvXz9++OEH1qxZg7OzM4GBgVLBUH1Fz8LDwzl27JiULypTP8hCqonSsWNHRo4cKUVfLPPmgiAgCAJz586luLgYpVLJPffcw+23387+/fvZsmUL77zzDleuXOGZZ57BwcGBt99+m4qKCt5++20yMzMlQbFv3z4+++wzBEHAy8uL119/nbFjx9odz7Bhwxg4cCB79+61Wu7k5MS7777L2LFjJWE3YcIE2rVrx+rVq6WpSAuW3muPPvqolVjTaDTcdddd9O/fn0OHDuHo6EhMTIwkJl1dXTly5AhQ2d8uNDSUiIgISUgpFAqUSqWVMPjqq6+qjGhERkYybtw4HBwc8PLykm5oJpOJZ555RhJKFn8Xy98vv/wyxcXFeHh4UFZWhqOjI2fOnOGRRx6RbhJarRaFQiF9Vpak8WPHjnHp0iUSExOlz1WhUPDzzz+zdetWdDqdJAo1Go30tHi95s8KhULKvRJFkbfeeouysjLGjh1LRUUF8fHxXLhwgaSkJNq0acODDz5ISUkJKpUKNzc3MjMzCQkJYcqUKde1UrBgGdO0adNwcnLi888/l3qD3Sienp68/PLLUoJ9faBWq+nfvz9eXl4sWLCg3vZbH3Tt2pXY2Fg++OCDet2vVquVIq2NgbOzMzNmzGDq1KlWv7+EhARSU1Ol3EQ/Pz/GjRt30z26rmXq1KnMmzeP3NzcRi1AKC8vZ968edxzzz0ALF26VHqAufPOO8nNzSU1NZUOHTpU20C7NtT1NypjH1lINSCWGyv8v1FhVUmPlgoaf39/XnnlFRQKBSaTSeoLJ4oiL7zwgtRWZdOmTRw/fpxp06Zx6dIlpkyZgiAIzJ8/H1EUmTp1KkVFRQiCwKOPPkrPnj3Zvn07UJlD8sYbb1QZ5rWM+7fffuPbb7+Vljs4ODB//nyryJJlaslkMtlcpJycnPj4449tIlEKhYJx48Zx5swZJk2aBICjoyMqlYp27doBlRfn8vJyioqKMJvNuLi4cODAAU6dOkVISAitWrWyMc6sjuPHjzN16tQqX09ISGDv3r1kZGRI52GpsFIqlXh7e3P69GkEQaB169bMnj2bxx57jJYtW1JeXo5SqcTBwYHy8nL8/f0pLCzkhRdeICcnB19fXyoqKigoKECv1xMVFUV8fDwFBQWcPn2a9evX88wzzzB79mwEQZCMVRUKhV3jRKVSKTkcl5WVMX78eObPn89XX30FVOa79erVi0mTJknJ52vWrGH8+PHk5+fz3nvv8a9//atW0x3vvvuuJO7feuutWlX22cNyboWFhfXqoD5ixAiKi4tr7GZ/K/DEE0/wxx9/sHv37gY/tpOTE2+++abdNjwLFy60+jsjI6PBRRTA9OnTefvtt/nggw8a1Z7D0dGRuXPnMmXKFCZPnmx1vUxOTqZFixb06dMHpVJZb0JK5uYgC6kGIjQ0lKFDh9KiRQtMJhMZGRm4ubmhUqlwdXXFaDTi5OSESqWSRIharUapVGI2m7l8+TIODg6oVCqefvppysvLmTNnDs888wxHjx5l48aNxMbGkpqayquvviodVxAEHn/8cSla9MILL9C2bVt+/PFHfvzxR6Kiopg+fTpFRUVMnjzZ7tiffPJJunbtapWc7e7uzvvvv4/ZbLbqn2dxXL/Wy8mCyWSyiXq9+OKLREREWFUMGQwGMjIyOH36NGFhYeTn5+Pm5iZVsW3dulVaNy0tjbS0tJp9EDXEYlzZt29fDAYDf/zxB1DpRFxcXMz27dtRq9Wo1Wqee+45ysvLOXXqFKtWrcLNzY3IyEhKS0vx8PCgsLAQZ2dnJk6cyMMPP8z58+f59ttv7UZ/WrZsyZtvvkl5eTnTpk0jPDwcg8FAdnY248ePx8PDA1EUpURRhUKBj48PgiBgMpmq7LmnUqk4ePCgVaXi1Y7277//fq3en6q+KzdCXFwc//znP8nOzm6UNjQyMg2Np6cn06ZNY8qUKTz55JP4+flZPSSdPHlSKihxc3OjU6dONp52Mk0HWUg1EGfPnmXNmjVMnjwZg8HA559/zuOPP85XX33FzJkzmT9/PiNHjqR169ao1Wqb6JWTk5MUlbK0VPn++++pqKjAaDQSHx9PVFSU5CSt1WqldjEWXn/9dQIDA/niiy+kaBRAZmYmL774YrXj/+6772zK/isqKqxyWd5//327PlJQmfz61ltvVRlKnz9/vo2HjVKpJCIiAkEQ6NChA+fOncPT01Nanp+fX+uKtNqSmJhoM2aDwWAVmbvzzjtJTExk1apV9O7dm/z8fH799Vfat2+Pq6urFJkaNWoUTk5OGAwGq3327NkTs9nMzp07SU1NlaJylhysq7n//vsJDg7mjz/+YNeuXQQHBzNgwACbp/1rqcrqoaa4ubnxj3/8g8LCQnQ6HVqtFo1GQ3FxMatXr77hz2Hw4MH4+/vXa06Uhaeffprk5GS7eXo1oW3btgwfPpznn3++nkd283j55ZdZtmxZrWwr6gsfHx+ee+65KlsuNSUsbVkaG1EU+fjjjxEEgenTp/Ppp5/a5Mzl5+fLIqqJc10hJQiCDkgEtH+tv0IUxVcEQfAAlgGhwFngH6Io5v21zQzgn4AJeFoURduOt39D0tPTefnll4HKH5CLiwuPPfYY77//PvHx8fj5+QFIN9qrzQgtlWWWJMFly5aRl5fHl19+KV0Qtm3bhtlsxt3dnTfeeANBEJg0aZIUjVIqlXzwwQckJycjiiJxcXGMGTOGvLy8Ki8q06ZNY9u2bezbt0+6+QcFBfHSSy8BSNt9+OGHuLq68tprr9kkMkZEREjJ7tf2j3vttdf47rvvOHHihF2RZRl7RkYGRqORK1eu0Lt3b0pKSupcIVYTrh6T0WgkMzOT1atXAzB8+HApDyUhIYGKigrJdLNXr14YjUbOnDlDmzZtyM/P5/vvv5eE0dXJo7t27bI65tWfxbXviSiKbNy4kcOHD2M2m0lLS5OS42t6HrUhIiKCVq1asWHDBhYuXGhX3Fneg3bt2qFQKEhJScHDw4Nhw4bh5OREbm4uy5Ytk9aPi4ujZcuWtGrVipSUlBqN/0awWGTcyLn36NGDkSNHkpub2yRuuDVhzpw5eHp6NkreT3h4OA888ACzZ89uFu+XZYzjx4/n0KFD7Ny5s9HGYvmOzps3r0lZMsjUnJpEpMqBAaIoFgmCoAZ2CILwM5AAbBZF8S1BEKYD04FpgiC0AUYAbYEWwK+CILQWRbFp1Zs2ApYpGQtffPEFjz76KOXl5YSHh0tGcZak4dOnT/O///0PrVbLkCFDEASBZcuWIYoiBQUFiKJo94dnSY6eNm2aZGY4e/ZsvL29KSsrw2QyMXDgQIYNG0ZGRkaVbVJmzJhBREQEmzdvlgRNmzZtGDdunDS1aMHR0ZFXX32VtLQ0qwtpbGwsjzzyCEVFRbz11ltW1TKvvvoqAQEBVFRU2OTnGAwGzp49y9q1a3nuuedQKBSoVCopqfrcuXOEh4ej0WgarALl0qVL/Pzzz5Jw2LBhA7fffjtbt27FbDZLn69CoSA/P5/AwEB0Oh3p6ekYDIYqk39btmyJ2WyukRni+vXrrd6vqr4D9UVaWhoXLlyo0XGuNgnNz89n5cqVqNVqHnzwQQBGjx6NTqdj3bp13HbbbezatYvCwkISEhJIS0sjOTmZbt26sXXrVh566CG+++47SRT06tULb29vDh8+XCMz0qeffprff/+dQ4cO3dB5q1Qqzp49a1OhWl8MHDgQLy+v60YSa8orr7zCZ599ZtW3saHo2LEjXbt2ZcGCBfVaqn+jxMfHo9FoWLdunc1rs2fPZu7cudL1TKvVolI1/MRMXl4e77//PrNmzZKuo7IVQfPlut8gsfJKZimjUf/1nwjcA/T7a/n/gK3AtL+WLxVFsRxIFQThFHAb0HiSv4kyaNAgfvjhByoqKliyZAkajYZBgwbRtm1bFAoFBoOB3NxctFotFRUViKJ4XZM2f39/HnzwQd555x3JyfuFF15gxYoVVFRUkJGRwZAhQ+jfvz9nz55l+fLldsuvp06dSnh4ON9++63URqVLly4kJCRQXl7OZ599JrmIz5gxg3fffZe0tDQrQdSjRw+GDRtGYWEhX375pc30j7e3N59++mmVrtvl5eVSEvOZM2cIDg5GEAQpMbm0tLRBy5gt+VEWioqK2L59uyRqrx07VOaElZaW2k3G7ty5Mw4ODly4cAGVSkWbNm04cuQIOp2OuLg4qzwwCzWtqqsvqhOA13K10DKbzRQUFCAIgtQDcvPmzSgUCnJyckhOTiY9PZ2cnByuXLmCk5MTZWVlUrR0y5YtVu+pWq3mxIkTeHp6cs8991BaWoparZZuhHl5eej1elatWsWkSZP4/fff8fT0xN/fv9ZCu0+fPvj6+rJkyRLJXLa+seRD1pfo8fLy4vLlyw3+/YiLiyMoKIh169bdtPeqthw4cKDKIp5vv/3W6hq1YsWKBn/PoPL3kZmZyeLFixv0uGPHjuWHH35ocLF9q1MjKS4IghLYD0QA/xVFcbcgCL6iKF4EEEXxoiAIPn+tHgBcPVeR8dcymWtITk6W3KMtjtiFhYXSReDaEHlN2hoUFBSwbds2zp49y4QJExAEgaioKD755BOKiooYOnQoffr04ezZs/z888923asnTJjAH3/8wY4dOzh06BDFxcV0796dO+64g/LyclauXElmZqaU17JlyxbJ+NNC3759GThwIAUFBaxatcpGSIwfP55FixaRkpJS7ZOY5Sbu5OQEQGlpKTqdjoyMDDQaDS4uLpSWlhIcHCxVkTWkwV9VHkAWJ3ZLFaNFLHbo0EEy53R2diYnJ0dyaBdFkW7dugFw+vTpBjuHm4koily8eBFA+hcqv/tlZWWUlZVx5swZHBwcMJvNFBYWct9990niCypd01NTU4mIiCAqKgp3d3cuXLjAb7/9hrOzM/feey/ffPMNDzzwAP/85z9JSkriyJEjeHh42LVSUCqVxMXF8fvvv9OvXz927NghRShiYmK47bbbMBqN17WdsIder6dVq1bVtsjp168fRqORHTt21Hr/1yIIAmPGjOGbb75p0GjQoEGDOH36NO7u7uh0uibVnLply5aoVCq7eXvX/q6u/k42NGazmdTUVB599FG++eabBpmS3bNnT5OIGt5q1KjXniiKJlEUOwKBwG2CILSrZnV7d3ubb4ggCOMFQdgnCMLftq7TXsPciooKysrKMBqNCIKAVqulbdu20lNUbGys3RJ4C8XFxSQlJUlGbhkZGaxcuZLy8nLi4+Pp06cPaWlpbNy40eaiIggCw4cP5/z58+zevZtdu3ZRXFxMXFwcAwcOxGQysW7dOs6dO8d9991Hly5dyMjIYPfu3VYXgV69enH77bdTVFTE+vXrJf+nq+nWrRsHDhyoVkTpdDrJY8XNzQ1BEEhNTcVoNBIYGEhQUBCtWrWiQ4cO+Pr6UlhY2GTcq4uLi0lPT5fM9CwXr6KiIlq2bIlSqeTMmTOkpaWh0Wjw8fFBqVRy5coVLl++LAnrW5X8/HzpPSkrK5OiGaIoSpFUCyUlJVy4cIELFy5w6NAhtm/fzv79+zlz5gypqakoFAo6duzI77//TmxsLH/++SfdunUjOjpaEuBQ+X0KDg62iuxenR/Yu3dvBg4ciFar5cqVK4SHh0vC9nr4+vrSsmVLjEbjdd3kLY76dbWMsNClSxcOHDjQYG7d/fv3RxAESkpKOH78eJNKhO7cuTM+Pj7NJuIiimKDilBfX1/pgTM6OprQ0NAGO/atTK0mh0VRzBcEYStwJ5AlCIL/X9Eof8By9csAgq7aLBCw+aaIovgZ8BmAIAhNry17A9OmTRtcXV3Jy8vjzJkztGrVCk9PT7p3705gYKC0XnR0NAUFBaSmplZ74bSIHgvdu3eXckx+//13qaebBZVKRc+ePdHr9axcuVISRp06dZI6k2/atIl9+/YREhJC79692bp1q908hLi4OEpKSti0aZNNdEgQBPr06UNiYuJ1L/w6nY7BgwdbJTgXFxeTm5uLp6cnRqMRlUqFXq+XIlVNJdHVMu2o0WikaAcgfbaCIFhFIbVaLYIgkJeXZ9Xj7++G0Wi0cUe3RPPsCfKKigr27NmDh4cHq1atIjQ0lIqKChwcHNBoNFb5L4Ig4OzsTGxsrOTLo9FopO9W9+7dMRgM7N69m8zMTLRaLc7OztWO18/PDxcXF5ycnCguLqa0tNTmt3U17du3Jy8vr1kL5V69erF8+fImFYWCyvc2MDCQEydOcPTo0cYeTo3ZtGmT9P+xsbGcOnWqVq2daoOjo6P0IK7VajEajbi7u+Pm5ia7ndeBmlTteQOGv0SUAzAQeBv4CXgUeOuvf1f/tclPwBJBEP5DZbJ5K2CPzY5lrAgPDycgIIDffvsNs9lMWFgYfn5+3H333SQnJ5OZmYmXlxcZGRm0aNGCc+fO1eoJtGPHjixbtszuVJRGo6FDhw6MHj2acePGSctbt27NAw88gMFgYO3ataSmptK2bVtcXFxISkqyO78fERFBTk4O+/fv588//7R6TaFQ0KZNG7p06cJ//vOfakPZOp2OyMhIqaLFYDCgVCpp0aIFly9fpqioSMqP8vDwwNPTk6CgICoqKrh8+TJZWVkEBwdz7ty5RnUvrqiosIk8XNtw2BJt8fDwIDg4WDoHmetTXl4umdICUkNpe02AS0tLycjIICEhQRJS7dq1Izk5mZYtW5KVlUVycjJHjx61iZQKgkBkZKRNFNnb2xt/f3/S09NrlAR/xx13sH37dpvfxo2gUCiIiori6NGjDfYdb9WqFRkZGY2SV1Qd4eHhxMTEsGvXrmqFbEMTGhoqNSiHyqlfDw8Pu753rVq1IiYmhszMzJsmpK5uKG55yA0JCSEgIEAWUnWgJlN7/sBvgiCkAHuBTaIorqVSQMULgnASiP/rb0RRPAwsB44AG4DJcsXe9VmzZg2ffvopx48fx2g0YjAYpCdlo9HI7t27OXfuHAcPHuTgwYO1rtSaP39+lSIqOjqaiRMnWiV9t2jRgokTJ6JUKlm2bBn79++nU6dOPPHEE8TExNhtt+Hv78/dd9/Njh07bG4UKpWKsLAwnn76aebNm3fdC7+3tzfjx4+3qeiz9CB0c3OjrKyMwsJCKioqyM3NJTs7m6ysLDp27Iher6dPnz6NUpFzo1y+fJkzZ87QunXrxh7KLUtBQYFVpdzXX39NRUUFd911F1u2bCE/Px9XV1eb7ZRKJffdd5/VMk9PT06cOMHGjRvtTtPfTJRKJcHBwUyePJmPPvqoQSKxlt/3Tz/9ZDe3sjF5+OGH2bFjR5MSUVA5Derr6yv9HRAQQJ8+feyue/fdd7Nq1aoGj/SlpaXVKl/P0dERNze3mzii5kdNqvZSgE52ll8Cbq9im9eB1+s8ur8pWq0WnU4HVD4JazQaBg4cyNq1axk6dCilpaVs2bKlzkmDarWadu3a8cQTT5Cdnc3s2bOByqeml156CYPBwPvvv096ero0pmPHjvHpp5/a7Euv10utF669EFgu+jNmzLhu1SFUii4nJyeMRiNpaWm0bNnSqqVOWloarVu3pqysjCNHjtidPhw0aBCLFy9u1GjUjVBQUGDjKyVz8/nwww+rfd1oNPLmm29aLbv//vvZsmWLTYTRHs7OzvUaxXF1deX555+/aZGLa3F2duaJJ57gzTfflCxVmhLFxcXodDpUKpXVVHpjs2zZMqvo5rFjx6oU3Vd3GmjKdOjQgdDQUJYuXdrYQ2ky1CjZXKZh+eWXX/juu++sys4VCgV33303giDwyy+/1EvlRZcuXZgwYQLnz5+XnM1VKhXvvfceSqWSV199lfT0dJRKJQkJCdx77712pxNVKhVvvfUWOp3ORrhYetG9+OKLXLlyhSlTplQ7JkEQaN++PVOnTsVoNEqmpJYInE6n4/z586xcuRKFQkFkZKTUrNgy9y+KIhs2bGh2Ikqm6WKvYnbBggU1ElEAs2bNwtPTU+qXWV3BSE1QKpUUFRUxY8bqL9ixAAAgAElEQVSMOu2nJgiCwJtvvsns2bObpIiCShFy5513Sr05mwJKpZJJkybdchHmXbt2sWzZMilpXQaEpnCzkZPN7RMUFMR9991HQUEBxcXFkjD48ccfb0qFjlKp5LPPPkMURSZPniw9SU2aNInOnTuzefNmvvvuO6ttBEHg888/Z+LEiXafBOPi4nj88cfJyspi5syZ1x1D//79GTVqFOfOnWPevHk8++yzBAYGMmHCBJ599lnKysrQaDScPHmSiooKfH198fLywmg0cvr0aZs2NjIydUWlUtGhQ4dqLQ1qw8MPP0xmZqZVknFtCA0N5Z///CezZs2ql/FcD0EQ+PDDD2vd2LohmT17NgsXLqy3Ssj64LXXXmPBggVV+uQ1Z9q1a8fAgQNr3aOzmbBfFMUutdlAjkg1Yc6dO8c333xjs3zYsGE35HFTE0RRZPz48ZKIevHFF+ncuTPLly+3EVEajYbPPvuMcePG2RVRQ4YM4fHHH+fkyZM1ElH3338/o0aNIiUlhVdffRWlUklgYKBVNMDJyYnff/8dBwcHAgICUKvVFBYWUlJSQlhYGIMHD67jOyAjY43RaCQpKYno6Oh62d+3335baxF1//332+RoNQRarZb33nuPp59+ukmKqPfffx+dTsfs2bOblIh65513mDdv3i0pomRsaT6ZuH9T8vPzWbRoEYIgMHToUKCyTcjNaCdgMpl44oknrBJXLRGnvXv3Wq3r6urKq6++yuTJk6udQtuzZw9ff/11jY4vCAKJiYlS5ZVl2bXrdOvWDZVKhUKhkBzfTSYT6enpREZGolAo7NoyyMjcKKIoNnhC+dVcbVCalpbGa6+9hlarZdasWVLfy/rG1dWVadOm8cILLzS5aXKFQsE777zDtGnTmmRrFUEQmtx7Vp8cPny4UX8PTQ05ItUMsFTx/fzzz/z888839cJxdTXgiy++yKpVqzhw4ICVuAoICGDWrFmo1eoqqwfvv/9+1Go1ixYtqlGLkdGjR1NYWMiKFSvsRre8vb2l/9doNAAkJiayc+dOSkpKMJvNuLq6cuLECTZv3lzj85WRqSn1cWOMiorCw8Oj1tuZTCar/ooGg4Hy8nL+/e9/13lMVWHp+9lUG+lqNJomO7a5c+c2GXPgmuLl5WXTVL4qLN0YLAiCwCuvvPK3zZuShVQzwtJSo6H43//+x8mTJ62EUOvWrbnnnnv48MMPeeutt+xuN2rUKHr27AlQo+TUMWPG0LVrV0RRtLu+2WzGwcEBQRBYvnw569atY8WKFTg7O6PRaDh48CAmk4n8/HyUSiU9e/bkoYceqlFLHRmZhiI6Oprc3Nx6rbS7GQ7eERERPP/884waNapJ5sDodDqee+453nnnnSYb9RkzZgyOjo6NPYxakZ+fb5O+8eSTT17XlBYqhdWXX37ZJKd/r4eXlxdjx46t0z7kqT2ZKrl2fr99+/a0bduWX3/9lfT0dLvbPPTQQ5w/f57Dhw/XyKH74YcfpmPHjmzfvt2q1YSXlxejR48GKn+klihUTk4ORqMRo9FIdHQ0YWFhlJSUoFKp8PX1RRRFCgoKpLY1gwcPZtu2bU222kjm78OFCxcoLi5uUuX59sjOzmbDhg0YDIYml+Pj4uLCyJEjCQwMbHJeVlezadMmhg8fztq1a5uNua7RaOTixYsoFAoeeeQRFi5cyObNm2v88N6UP4/qUKvV+Pv712kfspCSqTHe3t6SCaE97r33Xi5dukRKSkqNLh4JCQl07dqVHTt2kJiYKPVY8/f3Z8iQIbRp04by8nLUajWhoaGSNYJKpUKlUtGiRQs0Gg2iKKJSqXBwcMBoNJKVlcWZM2eAyh93U79xyfw9aC793woKCjh06FBjD8MuBoOBo0eP2m0X1JQIDw8nPT29SeZvXQ9RFCVj0+bUaudGKSgoYOPGjXXahyykZGpMVReGgQMHApVl4rt27arx1EW/fv3Yt28fmzdv5tKlS9JyLy8voqOjSUxMlJrGWqr32rVrh4ODAwaDAZVKJbWKsTSnNZvNVu7GTfWGICMjUztcXV2Jiopi27ZtjT2Uaunbty+CILBz585mJ6TUajXdunWrldP5rUBd00DkHCmZGnPq1CmbhrIADzzwAK1bt2bt2rU1FlGdOnVCqVSyfv16GxHl5eXFrl27WLNmDWq1GlEUcXZ2RhRFnJycUCgUnD17lpycHMn53YLBYKhRzzMZGZnmhZeXF/Hx8Y09jCoRBIEOHToQFhbG2rVrm52I0mq1tG3bltDQ0MYeSoOi0WgICgqq0z5kISVTZ86ePcuCBQtqPJceEhLCxIkTuXjxolUiu7u7O506dcLLy4uVK1dK/ZxMJpM0bWdJuM/IyCA1NZXc3FwKCwulPns5OTk35RxlZGQaFi8vr2aTsK1QKAgJCeH222/n66+/bpZJ166urtx7770sWrSosYfSoOTl5fHDDz8gCAKBgYE3tA95ak+mzlzbg6w6lEolM2fOJC8vj3//+9/SU5uTkxN33XUXoiiyZMkS3N3dpSlDCxUVFZSXl1NSUkJsbCz79u1j3759TbZyR0ZG5sYZNmwYKSkp/PnnnygUCoxGY5V5Zs7OzpSWljaKgFEoFPj7+zNy5EjeeOONBj9+fWE2m5tNHt/NQKlUMnr06Cqr0atDjkjJNCharZaysjKmT58uiSi1Ws3o0aPp0aMHBoMBhUKBu7s7Xbt2lbYTRREHBwdcXFw4dOgQV65cwc3NDVdX18Y6FRkZmZuIQqFAEAR69OjBsGHDSEtL46OPPrJZT61WM2rUKHx9fRt8jIIgEBAQwGOPPdasRRTApUuX+O9//9vYw2g0jEbjDYkokHvtyTQgTk5OvPPOO0yePNlq+QsvvEBYWBg//PADmzZtokOHDkyaNAlRFKmoqEClUnH8+HFUKhWCIJCens758+elfnsajYbc3NxGOisZGZnGZMqUKXzxxReNEk2Jjo5myJAhvPvuuw1+7PrGx8eHJ554gtmzZzf2UOqNG3SYl3vtyTRNfHx8mDt3ro2IsvD555+zadMm+vTpw6RJk8jIyGD69OlV7s/d3R0HBwcKCgpkESUjc4vh4uKCSlWzzJN58+Y1iojq3r07vXr1uiVE1K2IQqHg9ddfbxBjZjlHSuamExERwejRo5k6darNa6+++irffvstp0+fBiqfIOz11ystLUWv15Ofn8+FCxcQRZGAgABCQkLIy8vj8OHDDXIuMjIyNx9L9a+joyMuLi4YDAby8vJQq9VNqhruVuue0BRmqOoDnU7HjBkzmDVrVoOckyykZG46qampvPPOOzaJoHPmzMHT0xOTyYQoigwZMoT4+HiOHDnC559/jlqtlta1VPdptVpOnjxJXl4eAEFBQQQHBxMXF0deXh7Hjx9vuBOTkZGpVwIDA7lw4YLU27OkpARRFFEqlZjNZmkqv7F77MXHx+Pi4sI333zTqOOoL1q2bMmQIUNuau/GhiAoKIihQ4cyf/583n777QYrPpBzpGQahVmzZuHv789HH33EqVOnGDZsGN27d+fo0aOsXLmSvLw8XFxcmDNnDiqVir179+Lu7s7ly5dJS0vj4MGD5Ofno1arJcFlMpma1NOqjIxM7VCr1VaWKA4ODkDlg5QgCNK/jX3fcnZ2RqFQ1GvfxMZEo9Gg1+utPP2aI2q1Gr1eX9e2PLXOkZIjUjKNwvLlyxEEgdOnT3PffffRuXNnDhw4wObNm6Vokz2cnJwICQlBr9eTlJRERkaG1YVXRkam+aLVajEajXh4eFBYWChFq41GozSN1tgiCqCoqKjBjuXn50dsbCzr16+/KfuPjIwkIiKCdevW3ZT9NxTh4eFERkbetPepOuRkc5lG4fjx4xw7dowhQ4aQk5PD6tWr2bp1K1lZWUDlE1LLli1RKBQYDAaUSiVQmZPg4OBAixYtaN26NT4+Po15GjIyMnVAEATJdNPJyQmDwYAoipInlKVBOTQNAdXQBAUF0aZNGzIzM+ndu7e0PCEhQbom1hUXFxcCAgLqZV+NhUUMWnJtGxo5IiXTqBQWFnLw4EGbKJRer6dXr15A5ZSdTqdDEAQKCgqoqKhAFEWKi4ttGhIrFArat29PcnJyg52DjIzMjSGKopQPZTabpan58vJyzGYzOp0OjUaD0WiktLRUWvdWJywsDH9/f9RqNdnZ2Zw+fZqOHTvSv39/Ll68SEFBQZ2FZZs2bSgtLZWKd5ozFRUVZGRkNFqOrCykZBqVzZs32yxzc3OjTZs20sUUKkP+lgvpyZMn0Wg05OXlUV5ejqOjI3q9nqysLARBkE06ZWSaEZbWUlcnBguCgEKhQKVSSf02b7UKOXsEBQXh4OBAp06dcHZ25uDBg/z5558AHDhwgGHDhlFYWMivv/5a52O5uLggCAKpqamcP3++1ttrNBq8vLy4cOFCtetdXYmt0Whq3EqsNpSUlDRqWx5ZSMk0OQIDA+nVqxf//e9/ad++PVD5Y7x8+TLe3t6UlJSg1Wrx8/OjtLSU8vJyAgICKCgooLS0lMTExEY+AxkZmdqi1WqpqKhAqVRiNBpRq9WUlJRQWFjY2ENrMDp27IhWqwVg165dVrYu5eXlfP/99/V2rF27dt3Qdg4ODpK4DQoKuq6QUiqVkpBydHSsdyHl5eVFmzZtEEWR9PT0et13TZGFlEyTo6Kigvz8fDQajdVyS3Sqb9++KBQKjh8/TmlpqdS42M/Pj9TU1MYYsoyMTB2xCCZHR0eKi4sRRbHZTznVlrKyMv74448mVz3n4OAgXY/9/PxwdHQkJSXlulNpgiBYpV9cXU2nUqlsUjNqi4uLC3FxcVy+fJk//vijTvuqC7KQkmlynDhxgvT0dGJiYoD/TzItLS21CvFnZWVJrualpaWkpqaiVCqbZed1GRmZSiyC6u+SD3U1lmR7C4IgoFQqEUURk8lUL+LjRujatSsREREAHDp0iAMHDuDt7c19993H/Pnzq9zOErmyV1nt6+t7Q1OKVzN8+HBSUlLYv39/nfZTV2QhJdNkMZvNlJaWkp2dDVSWt1ouMqIo4uzsjKOjIyUlJUDl/HtkZCSHDh1qtDHLyMjUPwqFAqVSectbnWzdutXqbx8fHyIiIjAYDCQlJdGjRw+2bdvW4ONKTEyUUiaCgoJo27YtKSkp1YoooFrj1LqKKKi8RzSFqKVsyCnTZNHr9UyZMoWCggKys7Px8PCQnsby8/PZuXOnPJUnIyNzSxIUFERISAhFRUUcPHiwsYdDTEwMly9f5ty5czXexnLNLikpQaVSXTc/KiAggNLS0roaatYVuWmxzK2FIAgEBgaSmpqKKIqcPHmS7du389NPP0kiytfXl86dOzfySGVkZGTqh/DwcMxmMzt27Gh0EdWrVy/0ej3Jycm1ElFQWUBgsa+oSZL5+fPnUalU+Pr6Xnfdp556ivDw8FqNx0JMTAyxsbE3tK095Kk9mSaNyWRi0aJF6HQ6fHx8MJvNeHp60r59eyoqKigvL+fSpUucPHmysYcqIyMjUy9Y2uD4+/vj5eXVqOkKO3fupFOnTpw9e1bKSa0pmZmZ6PV63N3dq+1YcTU5OTlApUGrl5cXaWlpdtdbsGABRqMRNzc3TCYTJSUlBAQE0KVLF1auXIkgCAwYMMDGYqdbt25cuXKFEydO1OpcqkMWUjJNmqsN+9RqNQqFAkdHR6miRxAE/Pz8MJlM0g9QRkZGpjlz9uxZKfensSv4TCYThw4duqH8NBcXF9zd3WtleWA575KSkmrzqCz5VxZzUotJs2VaUBRFu5V8Bw8exGw212sxgzy1J9OkUSgUdO7cGVdXV86fP48gCOh0OqDyqS09PZ1t27Zx5MiRRh6pjIyMTP1gNBrx9fUlMDCw2oTthuJqc+Sa4uvriyAIXLx48YbEoKXH4vW4OuE8Pz/fqoKvtLSU9u3bo1D8v9QpLy+3EoU6nY6oqKhaj+9qZCEl06RRKBS4urri4uJCeXk5giBw6dIl8vPzKSwspLy8nPLy8r+VaZ+MjMytSUhIiPSg2Jx9tNRqNcXFxej1epydnRus2tJkMlFWViZZNQBcvHix2vfRYDBIleE3ijy1J9OkEUWRK1euWJlzlpaWcubMGYqKiigtLW0UXxUZGRmZ+ubqfoL5+flWkZTmhL+/PwaDAaPR2OARNcsUn4Xr5XWZTKY6Vwk2z09J5m/F5cuXEUURlUqFIAi4uLiQm5tLXl4eJSUlN6V3k4yMjExDk52dLQmP0tJSK0HQXFAqlej1ejQajTRz0JAYjUYyMzNrVPlXX8hCSqbJY/EfUavVGI1GXF1d0Wg0ODs74+Tk1NjDk5GRkZH5C6VSyeXLl7ly5QpQef1uCCz3BKjMn/Xw8GiQ44IspGSaOJYIVH5+PqIoUlJSwpUrVzAYDOTn51NQUNDYQ5SRkZGR+YuKigouXryITqfDw8NDasJ8s3FycsLPzw+oTEA/evRogxwXZCEl08Qxm838/PPP+Pj4oFAoMBgM7N27l6KiIrp37y7145ORkZGRaTpkZmZy8eJFqYXXzSYvL49Tp041yLGuRU42l2nSiKLIhQsX8Pb2xmQykZubS/fu3TGZTGg0Gg4fPoxCofhbNjiVkZGRaaoIgoCvry8Gg6HWRp7NDTkiJdOkubps9ZNPPsHb25ujR49iNpvZtWsXBoOBgQMHNuIIZWRkZGSuJSgoiJKSklteRIEckZJpwpSUlLBs2TKeeeYZBEEAKl1sO3XqhEqlIiAggPLycioqKvDw8GjsRpcyMjIyMn+Rnp7e2ENoMOSIlEyTxWQykZqaiiAI7N+/n5EjR+Lh4SEZ1YWEhODv749KpSIiIoIBAwY09pBlZGRkZP5mCE3BOVUQhMYfhEyTw9HRkVGjRuHk5ER2djbOzs4IgiD5SRUWFlJRUYFSqUQURRQKBWVlZXz//feNPXQZGRkZmebJflEUu9RmAzkiJdNkEQQBR0dHsrKy0Gq1FBQUSK0GkpOTSU1NpaioiKKiIoqLi6W+e/fee28jj1xGRkZG5u+CnCMl0+QpLCxEEAScnZ1Rq9UYDAY8PT0xGAxcuHCB0tJSfH19SU5O/r/27ijEsvq+A/j3N3dnZt3tzLqJs6tRo2EjbDdNTE0wEgOxIo0lIQmBgIU0PgTyEiGFQoh9KX3LU8lL8xDSUCFtRWhDRLDVmJZAKEZtUsy6Jitq1mXXuOLq7DqrszPz78NcZTRbnTnZ7Ll35/OB5dzzv+defvDj3vnu/5x7/nnppZfesGglAPw+CVKMvPn5+ddDVFWlqvLOd74zy8vLOXLkSH7961/n+PHjefHFF5MkzzzzTM8VA7BZOLXHSGutvb721GsXmbfWMhgMsmXLlszNzWV2djZHjx7tuVIANiMzUoyslZWVHD9+PNu3b399FfRXX301VZVnn302J06cyNLSkvX2AOiNIMXIWlpaysGDB/Oud70rVZWJiYksLCzk1KlT2b9/f44cOfL6bBUA9MGpPUZWVWV+fj6Li4t5+eWXMzU1lW3btuXHP/5xtm/ffk5X9waAMzEjxUgaDAaZmZnJCy+8kJ07d6aqcumll+aCCy7IxMRE9u/f33eJAGBGitH12iLFF198cXbv3p2qypNPPul0HgAjQ5BiJFVVdu7cmdtuuy2nTp3K6dOnMz8/nz179uTmm2/Orl27+i4RAAQpRtPa2x5MTk7mve99b5aWlrK0tJSLLroon/vc57Jv376eqwRgsxOkGEnLy8t56aWXkiQLCwt5+umn89q6kFu2bMnLL7+cV155pc8SAUCQYnS11lJVGQwGue+++3LPPfdkeXk5VZXWWj7wgQ9k7969fZcJwCYmSDHSVlZW8tBDD+XkyZO58cYb88Mf/jDPP/98tm3bliNHjuTw4cN9lwjAJub2B4ysqsrk5GSuueaaJMnBgwdz6NChLC8v55prrsmuXbty7NixnDx5sudKAdiszEgxshYXF/Pggw9m7969mZ+fz4EDB7K8vJxDhw7lxIkT2b17d+bm5vouE4BNzIwUI2txcTE/+clPMj09nQcffPANzz377LOZm5t7fQ0+AOhDvfZLqF6LqOq/CMbO1Vdfnenp6Rw8eDDHjx/vuxwAxt8jrbUPb+QF/jvPWNq2bVsee+yxnD59Oh/5yEeybdu2vksCYBMSpBg7k5OTue6667Jnz55MT0/niiuuyMc+9rG+ywJgE3KNFGPnpptuytTUVK699trMzs7m9OnTrz83MTGRlZWVHqsDYDMxI8XYuffee7O4uJjt27dneXk5rbVs3bo1s7Oz+eIXv9h3eQBsIoIUY+nee+/N448/noWFhRw7diwvvvhiTp8+nUOHDuWGG27ouzwANgm/2mNsDQaDTExMpLWW1lqWl5czPT2dj3/84zl69GgeffTRvksEYLxs+Fd7ghTnna1bt2Z5efkN104BwDpsOEi52JzzziuvvNJ3CQBsEq6RAgDoSJACAOhIkIIxMBgM+i4BgDMQpGAMTE1Npar6LgOANxGkYAxMTPioAowi384wIiYmJjI5OXnG57Zt2yZMAYwg38wwIubm5vKhD33ojM+Nwv3eAPhtghQAQEfubA4AsGrDdzY3IwUA0JEgBQDQkSAFANCRIAUA0JEgBQDQkSAFANCRIAUA0JEgBSNgeno627dv77sMADZIkIIRMDc3lyuuuKLvMgDYIHc2h54MBoOsrKykqpIkKysrPVcEsOlt+M7mW35flQBv7aMf/Wh+9atfZWZmJisrK3nyySf7LgmADTIjBQCwylp7AADniiAFANCRIAUA0JEgBSNkeno6l19+ed9lALBOghT0ZO/evdmxY8cbxpaWlnLixInMzc31VBUAGyFIQU8WFhaytLT0hrHl5eXMz8/n9OnTPVUFwEa4/QEAwCq3PwAAOFcEKRghExMT2bp1a99lALBOghSMiKrKzMxM9u3b13cpAKyTIAUjYmZmJldeeWWmpqb6LgWAdVp3kKqqQVX9rKruGe6/o6rur6qDw+3ONcfeXlVPVNUvq+oTv4/C4Xw0MTGR6enpvssAYJ02MiP11SQH1ux/PckDrbWrkjww3E9V7UtyS5L3Jbk5ybeqanB2yoXz2+TkZC655JK8//3v77sUANZhXUGqqi5L8skk31kz/Jkkdwwf35Hks2vG72ytvdpaeyrJE0muPTvlwvnrggsuyNzcXCYmJjI7O9t3OQCsw3pnpL6Z5GtJVtaM7W6tHU2S4XbXcPzSJM+sOe7wcAx4C8eOHcvDDz+ckydPZjAYuOgcYAxsebsDqupTSZ5rrT1SVTes4z3rDGO/dcPNqvpyki+v4/3gvLdjx47s3LkzKysrOXDgQBYWFnL8+PG+ywLgbaxnRur6JJ+uqqeT3Jnkxqr6XpLfVNUlSTLcPjc8/nCStauuXpbkyJvftLX27dbahzd6B1E4H506dSoLCwuZmprKyZMnMzc3l127dr39CwHo1dsGqdba7a21y1prV2b1IvIftda+kOTuJLcOD7s1yQ+Gj+9OcktVTVfVe5JcleSnZ71yOI9s2bI6OXz8+PG01rJjx45ceOGFPVcFwNt521N7b+EbSe6qqi8lOZTk80nSWttfVXcleSzJUpKvtNaWf+dK4Ty3uLiY+fn5zM7OZnnZRwZgHFi0GEbMli1bsmfPniwuLuapp57quxyAzWTDixYLUgAAqzYcpCwRA2NoMBhkMBikqjI5Odl3OQCbliAFY+jiiy/Ou9/97szMzGTv3r19lwOwaTm1BwCwyqk9AIBzRZACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6EiQAgDoSJACAOhIkAIA6GhL3wUMPZ/k5eGW8XFR9Gyc6Nf40bPxo2fj5c39umKjb1CttbNXzu+gqh5urX247zpYPz0bL/o1fvRs/OjZeDkb/XJqDwCgI0EKAKCjUQpS3+67ADZMz8aLfo0fPRs/ejZefud+jcw1UgAA42aUZqQAAMZK70Gqqm6uql9W1RNV9fW+62FVVX23qp6rql+sGXtHVd1fVQeH251rnrt92MNfVtUn+ql686qqy6vqP6vqQFXtr6qvDsf1bERV1daq+mlV/e+wZ387HNezEVdVg6r6WVXdM9zXsxFWVU9X1aNV9fOqeng4dtZ61muQqqpBkr9P8mdJ9iX586ra12dNvO4fk9z8prGvJ3mgtXZVkgeG+xn27JYk7xu+5lvD3nLuLCX5q9baHya5LslXhn3Rs9H1apIbW2tXJ/lgkpur6rro2Tj4apIDa/b1bPT9SWvtg2tudXDWetb3jNS1SZ5orT3ZWltMcmeSz/RcE0laaz9O8sKbhj+T5I7h4zuSfHbN+J2ttVdba08leSKrveUcaa0dba39z3TED3QAAAJYSURBVPDxiax+yV8aPRtZbdXJ4e7k8F+Lno20qrosySeTfGfNsJ6Nn7PWs76D1KVJnlmzf3g4xmja3Vo7mqz+4U6yaziujyOkqq5M8sdJHoyejbThKaKfJ3kuyf2tNT0bfd9M8rUkK2vG9Gy0tST3VdUjVfXl4dhZ61nfS8TUGcb8jHD86OOIqKo/SPKvSf6ytTZfdabWrB56hjE9O8daa8tJPlhVFyb5flX90Vscrmc9q6pPJXmutfZIVd2wnpecYUzPzr3rW2tHqmpXkvur6vG3OHbDPet7RupwksvX7F+W5EhPtfD2flNVlyTJcPvccFwfR0BVTWY1RP1Ta+3fhsN6NgZaay8m+a+sXpOhZ6Pr+iSfrqqns3opyo1V9b3o2UhrrR0Zbp9L8v2snqo7az3rO0g9lOSqqnpPVU1l9QKvu3uuif/f3UluHT6+NckP1ozfUlXTVfWeJFcl+WkP9W1atTr19A9JDrTW/m7NU3o2oqpqbjgTlaq6IMlNSR6Pno2s1trtrbXLWmtXZvXv1Y9aa1+Ino2sqtpeVTOvPU7yp0l+kbPYs15P7bXWlqrqtiT/kWSQ5Luttf191sSqqvqXJDckuaiqDif5myTfSHJXVX0pyaEkn0+S1tr+qroryWNZ/fXYV4anLDh3rk/yF0keHV5zkyR/HT0bZZckuWP4i6CJJHe11u6pqv+Ono0bn7PRtTurp82T1czzz621f6+qh3KWeubO5gAAHfV9ag8AYGwJUgAAHQlSAAAdCVIAAB0JUgAAHQlSAAAdCVIAAB0JUgAAHf0fXONDtWmp82QAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "fig1 = plt.figure(figsize=(10,10))\n", + "ax = fig1.add_subplot(111)\n", + "plt.imshow(img)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/compute_intersection_single_element.ipynb b/compute_intersection_single_element.ipynb new file mode 100644 index 0000000..d158d52 --- /dev/null +++ b/compute_intersection_single_element.ipynb @@ -0,0 +1,389 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import numba\n", + "from numba import jit" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# @jit(nopython=True, parallel=True)\n", + "def compute_intersection_single_element(x1,y1, x2,y2, x3,y3, x4,y4, \n", + " delta_x_tolerance=1e-6, grad_diff_tolerance=1e-6,\n", + " verbose=False):\n", + " \n", + " \n", + " inf_slope_status = np.array([0,0], np.int32)\n", + " parallel_flag = False # for readability\n", + " # check line 1 conditions\n", + " dx_line_1 = x2 - x1\n", + " dy_line_1 = y2 - y1\n", + " if np.abs(dx_line_1) < delta_x_tolerance:\n", + " inf_slope_status[0] = 1\n", + " \n", + " # check line 2 conditions\n", + " dx_line_2 = x4 - x3\n", + " dy_line_2 = y4 - y3\n", + " if np.abs(dx_line_2) < delta_x_tolerance:\n", + " inf_slope_status[1] = 1\n", + " \n", + " # if both have inf slope, they are parallel\n", + " parallel_flag = (np.sum(inf_slope_status) == 2)\n", + " if parallel_flag:\n", + " if verbose:\n", + " print(\"compute_intersection_single_element: Lines are parallel and have inf slope\")\n", + " return (-9999,-9999) # this value is chosen as it will be outside \n", + " # of our detection range (to be filtered later)\n", + " \n", + " # since one is inf and the other is not, they are not parallel\n", + " if (np.sum(inf_slope_status) > 0):\n", + " # there is one line that has inf slope\n", + " if (inf_slope_status[0] == 1): # line one has infinite slope\n", + " x = x1 # pick either one point x1 or x2 as they are the \"equal\"\n", + " y = (x - x3)*(y4-y3) / (x4-x3) + y3\n", + " if verbose:\n", + " print(\"compute_intersection_single_element: Line1 has inf slope\")\n", + " return (x, y)\n", + " \n", + " if (inf_slope_status[1] == 1):\n", + " x = x3 # pick either one point x3 or x4 as they are the \"equal\"\n", + " y = (x - x1)*(y2-y1) / (x2-x1) + y1\n", + " if verbose:\n", + " print(\"compute_intersection_single_element: Line2 has inf slope\")\n", + " return (x, y)\n", + " \n", + " else:\n", + " \n", + " # check both line conditions\n", + " m1 = dy_line_1 / dx_line_1\n", + " m2 = dy_line_2 / dx_line_2\n", + "\n", + " # Note: if the two lines are collinear, they are parallel\n", + " if np.abs(m1 - m2) < grad_diff_tolerance:\n", + " parallel_flag = True\n", + "\n", + "\n", + " if parallel_flag:\n", + " if verbose:\n", + " print(\"compute_intersection_single_element: Lines are parallel\")\n", + " return (-9999,-9999) # this value is chosen as it will be outside \n", + " # of our detection range (to be filtered later)\n", + "\n", + " if verbose:\n", + " print(\"compute_intersection_single_element: No line has inf slope\")\n", + "\n", + " x = (m2 *x4 - m1* x2 - y4 + y2) / (m2 - m1)\n", + " y = m2 * (x - x4) + y4\n", + " return (x, y)\n", + " \n", + " raise ValueError # np.sum(inf_slope_status) is abnormal\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "# from PointPillar\n", + "def compute_intersection_single_element(x1,y1, x2,y2, x3,y3, x4,y4, verbose=True):\n", + " num = (x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4);\n", + " den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4);\n", + " x = num/(den + 1e-6)\n", + " \n", + " num = (x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4);\n", + " den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4);\n", + " y = num/(den + 1e-6)\n", + " return (x,y)\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TestCase1i\n", + "(0.0, -8000000.0)\n", + "TestCase2i\n", + "(30000000.0, -30000000.0)\n", + "TestCase3i\n", + "(-0.0, 1.0000000185185187)\n", + "TestCase3ii\n", + "(0.8333333391203704, 1.1666666747685186)\n", + "TestCase4i\n", + "(0.8333333564814821, 1.1666666990740748)\n", + "TestCase4ii\n", + "(0.8333333101851859, 1.1666666342592602)\n", + "TestCase5i\n", + "(5.000000050000001, 2.0000000200000003)\n", + "TestCase5ii\n", + "(4.99999995, 1.9999999800000003)\n", + "TestCase6i\n", + "(5.000000125000003, 2.000000050000001)\n", + "TestCase6ii\n", + "(4.999999875000004, 1.9999999500000014)\n" + ] + } + ], + "source": [ + "# test the compute_intersection_single_element:\n", + "# test cases:\n", + "# 1. two lines have inf slope, both lines are parallel\n", + "# 2. no line has inf slope, both lines are parallel\n", + "# 3. two lines intersect internally, both lines are not parallel, no line has inf slope\n", + "# 4. two lines intersect externally, both lines are not parallel, no line has inf slope\n", + "# 5. two lines intersect internally, both lines are not parallel, 1 line has inf slope\n", + "# 6. two lines intersect externally, both lines are not parallel, 1 line has inf slope\n", + "# 7. two lines intersect internally, both lines are not parallel, one line has 0 slope\n", + "# 8. two lines intersect externally, both lines are not parallel, one line has 0 slope\n", + "# 9. two lines intersect internally, both lines are not parallel, 1 line has inf slope, 1 line has 0 slope\n", + "# 10. two lines intersect externally, both lines are not parallel, 1 line has inf slope, 1 line has 0 slope\n", + "\n", + "# case 1 (i)\n", + "class TestCase1i():\n", + " line1 = (-2, 1, -2, 3)\n", + " line2 = (-6, 2, -6, 3)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test1i = TestCase1i()\n", + "\n", + "class TestCase2i():\n", + " line1 = (0, 2, 2, 0)\n", + " line2 = (-3, 0, 0, -3)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test2i = TestCase2i()\n", + "\n", + "\n", + "class TestCase3i():\n", + " line1 = (-2, -1, 1, 2)\n", + " line2 = (-4, 5, 5, -4)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test3i = TestCase3i()\n", + "\n", + "\n", + "\n", + "class TestCase3ii():\n", + " line1 = (-10, -1, 5, 2)\n", + " line2 = (-4, 6, 4, -2)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test3ii = TestCase3ii()\n", + "\n", + "\n", + "\n", + "class TestCase4i():\n", + " line1 = (-10, -1, 5, 2)\n", + " line2 = (-4, 6, -2, 4)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "# intersection = compute_intersection_single_element(*self.line2, *self.line1, verbose=True)\n", + "# print(intersection)\n", + "\n", + "test4i = TestCase4i()\n", + "\n", + "class TestCase4ii():\n", + " line1 = (-10, -1, 5, 2)\n", + " line2 = (-2, 4, -4, 6) # reverse the order of points\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "# intersection = compute_intersection_single_element(*self.line2, *self.line1, verbose=True)\n", + "# print(intersection)\n", + "\n", + "test4ii = TestCase4ii()\n", + "\n", + "class TestCase5i():\n", + " line1 = (-10, -1, 15, 4)\n", + " line2 = (5, 4, 5, 0) # inf slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test5i = TestCase5i()\n", + "\n", + "class TestCase5ii():\n", + " line1 = (5, 4, 5, 0) # inf slope\n", + " line2 = (-10, -1, 15, 4)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test5ii = TestCase5ii()\n", + "\n", + "class TestCase6i():\n", + " line1 = (-10, -1, 0, 1)\n", + " line2 = (5, 4, 5, 0) # inf slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test6i = TestCase6i()\n", + "\n", + "class TestCase6ii():\n", + " line1 = (5, 4, 5, 0) # inf slope\n", + " line2 = (-10, -1, 0, 1)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test6ii = TestCase6ii()" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TestCase7i\n", + "(1.4999999750000004, 3.9999999333333345)\n", + "TestCase7ii\n", + "(1.5000000250000003, 4.000000066666668)\n", + "TestCase8i\n", + "(1.4999999250000036, 3.9999998000000097)\n", + "TestCase9i\n", + "(3.0000000375000004, 4.000000050000001)\n", + "TestCase10i\n", + "(3.0000001500000075, 4.00000020000001)\n" + ] + } + ], + "source": [ + "# Test cases\n", + "# 7. two lines intersect internally, both lines are not parallel, one line has 0 slope\n", + "# 8. two lines intersect externally, both lines are not parallel, one line has 0 slope\n", + "# 9. two lines intersect internally, both lines are not parallel, 1 line has inf slope, 1 line has 0 slope\n", + "# 10. two lines intersect externally, both lines are not parallel, 1 line has inf slope, 1 line has 0 slope\n", + "\n", + "\n", + "class TestCase7i():\n", + " line1 = (-2, -3, 4, 9)\n", + " line2 = (5, 4, 0, 4) # 0 slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test7i = TestCase7i()\n", + "\n", + "class TestCase7ii():\n", + " line1 = (5, 4, 0, 4) # 0 slope\n", + " line2 = (-2, -3, 4, 9)\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test7ii = TestCase7ii()\n", + "\n", + "class TestCase8i():\n", + " line1 = (-2, -3, 0, 1)\n", + " line2 = (5, 4, 0, 4) # 0 slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test8i = TestCase8i()\n", + "\n", + "\n", + "class TestCase9i():\n", + " line1 = (-2, 4, 6, 4)\n", + " line2 = (3, 6, 3, -4) # 0 slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test9i = TestCase9i()\n", + "\n", + "class TestCase10i():\n", + " line1 = (-2, 4, 0, 4)\n", + " line2 = (3, 6, 3, -4) # 0 slope\n", + " \n", + " def __init__(self):\n", + " print(type(self).__name__)\n", + " intersection = compute_intersection_single_element(*self.line1, *self.line2, verbose=True)\n", + " print(intersection)\n", + "\n", + "test10i = TestCase10i()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/config.py b/config.py index ec65ffd..7337600 100644 --- a/config.py +++ b/config.py @@ -10,8 +10,10 @@ class GridParameters: y_max = 40.32 y_step = 0.16 - z_min = -1.0 - z_max = 3.0 + # z_min = -1.0 + # z_max = 3.0 + z_min = -3.0 + z_max = 1.0 # derived parameters Xn_f = float(x_max - x_min) / x_step @@ -19,13 +21,28 @@ class GridParameters: Xn = int(Xn_f) Yn = int(Yn_f) - def __init__(self): - super(GridParameters, self).__init__() + def __init__(self, **kwargs): + super(GridParameters, self).__init__(**kwargs) class DataParameters: - classes = {"Car": 0, + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + map_classes = { + 0: "Car", + 1: "Pedestrian" + } + + classes_map = {"Car": 0, "Pedestrian": 1, "Person_sitting": 1, "Cyclist": 2, @@ -35,11 +52,24 @@ class DataParameters: "Misc": 3, } - nb_classes = len(np.unique(list(classes.values()))) - assert nb_classes == np.max(np.unique(list(classes.values()))) + 1, 'Starting class indexing at zero.' + nb_classes = len(np.unique(list(classes_map.values()))) + assert nb_classes == np.max(np.unique(list(classes_map.values()))) + 1, 'Starting class indexing at zero.' - def __init__(self): - super(DataParameters, self).__init__() + # classes = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # nb_classes = len(np.unique(list(classes.values()))) + # assert nb_classes == np.max(np.unique(list(classes.values()))) + 1, 'Starting class indexing at zero.' + + def __init__(self, **kwargs): + super(DataParameters, self).__init__(**kwargs) class NetworkParameters: @@ -50,7 +80,7 @@ class NetworkParameters: nb_channels = 64 downscaling_factor = 2 - # length, width, height, z-center, orientation + # length (x), width (y), height (z), z-center, orientation anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], [3.9, 1.6, 1.56, -1, 1.5708], [0.8, 0.6, 1.73, -0.6, 0], @@ -77,11 +107,11 @@ class NetworkParameters: heading_weight = 0.2 # 0.2 class_weight = 0.5 # 0.2 - def __init__(self): - super(NetworkParameters, self).__init__() + def __init__(self, **kwargs): + super(NetworkParameters, self).__init__(**kwargs) class Parameters(GridParameters, DataParameters, NetworkParameters): - def __init__(self): - super(Parameters, self).__init__() + def __init__(self, **kwargs): + super(Parameters, self).__init__(**kwargs) diff --git a/config_v2.py b/config_v2.py new file mode 100644 index 0000000..b477acf --- /dev/null +++ b/config_v2.py @@ -0,0 +1,118 @@ +import numpy as np + + +class GridParameters: + x_min = 0.0 + x_max = 80.64 + x_step = 0.16 + + y_min = -40.32 + y_max = 40.32 + y_step = 0.16 + + # z_min = -1.0 + # z_max = 3.0 + z_min = -3.0 + z_max = 1.0 + + # derived parameters + Xn_f = float(x_max - x_min) / x_step + Yn_f = float(y_max - y_min) / y_step + Xn = int(Xn_f) + Yn = int(Yn_f) + + def __init__(self, **kwargs): + super(GridParameters, self).__init__(**kwargs) + + +class DataParameters: + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + map_classes = { + 0: "Car", + 1: "Pedestrian" + } + + classes_map = {"Car": 0, + "Pedestrian": 1, + "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + } + + nb_classes = len(np.unique(list(classes_map.values()))) + assert nb_classes == np.max(np.unique(list(classes_map.values()))) + 1, 'Starting class indexing at zero.' + + # classes = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # nb_classes = len(np.unique(list(classes.values()))) + # assert nb_classes == np.max(np.unique(list(classes.values()))) + 1, 'Starting class indexing at zero.' + + def __init__(self, **kwargs): + super(DataParameters, self).__init__(**kwargs) + + +class NetworkParameters: + + max_points_per_pillar = 100 + max_pillars = 12000 + nb_features = 9 + nb_channels = 64 + downscaling_factor = 2 + + # length (x), width (y), height (z), z-center, orientation + anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + [3.9, 1.6, 1.56, -1, np.pi/2], + [0.8, 0.6, 1.73, -0.6, 0], + [0.8, 0.6, 1.73, -0.6, np.pi/2], + ], dtype=np.float32).tolist() + nb_dims = 3 + + positive_iou_threshold = 0.6 + negative_iou_threshold = 0.3 + # batch_size = 1 + batch_size = 4 + total_training_epochs = 160 + iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper + learning_rate = 2e-4 + decay_rate = 1e-8 + L1 = 0 + L2 = 0 + alpha = 0.25 + gamma = 2.0 + # original pillars paper values + focal_weight = 3.0 # 1.0 + loc_weight = 2.0 # 2.0 + size_weight = 2.0 # 2.0 + angle_weight = 1.0 # 2.0 + heading_weight = 0.2 # 0.2 + class_weight = 0.5 # 0.2 + + def __init__(self, **kwargs): + super(NetworkParameters, self).__init__(**kwargs) + + +class Parameters(GridParameters, DataParameters, NetworkParameters): + + def __init__(self, **kwargs): + super(Parameters, self).__init__(**kwargs) diff --git a/inference_utils.py b/inference_utils.py index 20ba7ac..e80b99d 100644 --- a/inference_utils.py +++ b/inference_utils.py @@ -44,6 +44,7 @@ def rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5): for boxes, confs in zip(set_boxes, confidences): assert len(boxes) == len(confs) indices = cv.dnn.NMSBoxesRotated(boxes, confs, occ_threshold, nms_iou_thr) + print(indices) indices = indices.reshape(len(indices)).tolist() nms_boxes.append([boxes[i] for i in indices]) return nms_boxes @@ -85,9 +86,79 @@ def generate_bboxes_from_pred(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_thr predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf)) + return predicted_boxes + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +def inverse_yaw_element(bb_yaw): + + + bb_yaw -= np.pi / 2 + while bb_yaw > np.pi: + print("larger than pi") + bb_yaw -= (np.pi * 2) + while bb_yaw < -np.pi: + print("smaller than -pi") + bb_yaw += (np.pi * 2) + + return bb_yaw + + # if bb_yaw > np.pi /2: + # bb_yaw -= 2 * np.pi + + # bb_yaw += np.pi/2 + # return bb_yaw + +def generate_bboxes_from_pred_and_np_array(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + predicted_boxes_list = [] + for i, value in enumerate(coordinates): + # print("coordinate ", i) + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_yaw = -np.arcsin(np.clip(ang[value], -1, 1)) + real_anchors[i][4] + # bb_yaw = np.arcsin(np.clip(ang[value], -1, 1)) + real_anchors[i][4] + # bb_yaw = inverse_yaw_element(bb_yaw) + bb_heading = np.round(hdg[value]) + # print(bb_heading, bb_yaw) + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + predicted_boxes_list.append([bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf]) + + return predicted_boxes, np.array(predicted_boxes_list) + + class GroundTruthGenerator(DataProcessor): """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ diff --git a/inference_utils_v2.py b/inference_utils_v2.py new file mode 100644 index 0000000..685b1ec --- /dev/null +++ b/inference_utils_v2.py @@ -0,0 +1,209 @@ +import numpy as np +import cv2 as cv +from typing import List +from config_v2 import Parameters +from readers import DataReader +from point_pillars_custom_processors_v2 import DataProcessor + + +class BBox(tuple): + """ bounding box tuple that can easily be accessed while being compatible to cv2 rotational rects """ + + def __new__(cls, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + bbx_tuple = ((float(bb_x), float(bb_y)), (float(bb_length), float(bb_width)), float(np.rad2deg(bb_yaw))) + return super(BBox, cls).__new__(cls, tuple(bbx_tuple)) + + def __init__(self, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + self.x = bb_x + self.y = bb_y + self.z = bb_z + self.length = bb_length + self.width = bb_width + self.height = bb_height + self.yaw = bb_yaw + self.heading = bb_heading + self.cls = bb_cls + self.conf = bb_conf + + def __str__(self): + return "BB | Cls: %s, x: %f, y: %f, l: %f, w: %f, yaw: %f" % ( + self.cls, self.x, self.y, self.length, self.width, self.yaw) + + +def rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5): + """ rotational NMS + set_boxes = size NSeqs list of size NDet lists of tuples. each tuple has the form ((pos, pos), (size, size), angle) + confidences = size NSeqs list of lists containing NDet floats, i.e. one per detection + """ + assert len(set_boxes) == len(confidences) and 0 < occ_threshold < 1 and 0 < nms_iou_thr < 1 + if not len(set_boxes): + return [] + assert (isinstance(set_boxes[0][0][0][0], float) or isinstance(set_boxes[0][0][0][0], int)) and \ + (isinstance(confidences[0][0], float) or isinstance(confidences[0][0], int)) + nms_boxes = [] + for boxes, confs in zip(set_boxes, confidences): + assert len(boxes) == len(confs) + indices = cv.dnn.NMSBoxesRotated(boxes, confs, occ_threshold, nms_iou_thr) + print(indices) + indices = indices.reshape(len(indices)).tolist() + nms_boxes.append([boxes[i] for i in indices]) + return nms_boxes + + +def generate_bboxes_from_pred(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + for i, value in enumerate(coordinates): + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_yaw = ang[value] + real_anchors[i][4] + # bb_yaw = -np.arcsin(np.clip(ang[value], -1, 1)) + real_anchors[i][4] + bb_heading = np.round(hdg[value]) + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + + + return predicted_boxes + + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +def inverse_yaw_element(bb_yaw): + + + bb_yaw -= np.pi / 2 + while bb_yaw > np.pi: + print("larger than pi") + bb_yaw -= (np.pi * 2) + while bb_yaw < -np.pi: + print("smaller than -pi") + bb_yaw += (np.pi * 2) + + return bb_yaw + + # if bb_yaw > np.pi /2: + # bb_yaw -= 2 * np.pi + + # bb_yaw += np.pi/2 + # return bb_yaw + +def generate_bboxes_from_pred_and_np_array(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # print(occ.shape) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + predicted_boxes_list = [] + for i, value in enumerate(coordinates): + # print("coordinate ", i) + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + # print("i: ", i, "\tx: ", real_x, "\ty:", real_y) + # print("i: ", i, "\tx: ", value[0], "\ty:", value[1]) + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_heading = np.round(hdg[value]) + bb_yaw = ang[value] + real_anchors[i][4] + # if np.int32(bb_heading) == 0: + # bb_yaw -= np.pi + + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + predicted_boxes_list.append([bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf]) + + return predicted_boxes, np.array(predicted_boxes_list) + + +class GroundTruthGenerator(DataProcessor): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, data_reader: DataReader, label_files: List[str], calibration_files: List[str] = None, + network_format: bool = False): + super(GroundTruthGenerator, self).__init__() + self.data_reader = data_reader + self.label_files = label_files + self.calibration_files = calibration_files + self.network_format = network_format + + def __len__(self): + return len(self.label_files) + + def __getitem__(self, file_id: int): + label = self.data_reader.read_label(self.label_files[file_id]) + R, t = self.data_reader.read_calibration(self.calibration_files[file_id]) + label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t) + if self.network_format: + occupancy, position, size, angle, heading, classification = self.make_ground_truth(label_transformed) + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + return [occupancy, position, size, angle, heading, classification] + return label_transformed + + +def focal_loss_checker(y_true, y_pred, n_occs=-1): + y_true = np.stack(np.where(y_true == 1)) + if n_occs == -1: + n_occs = y_true.shape[1] + occ_thr = np.sort(y_pred.flatten())[-n_occs] + y_pred = np.stack(np.where(y_pred >= occ_thr)) + p = 0 + for gt in range(y_true.shape[1]): + for pr in range(y_pred.shape[1]): + if np.all(y_true[:, gt] == y_pred[:, pr]): + p += 1 + break + print("#matched gt: ", p, " #unmatched gt: ", y_true.shape[1] - p, " #unmatched pred: ", y_pred.shape[1] - p, + " occupancy threshold: ", occ_thr) diff --git a/loss.py b/loss.py index 0cb355a..bd9aa21 100644 --- a/loss.py +++ b/loss.py @@ -61,7 +61,18 @@ def size_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): masked_loss = tf.boolean_mask(loss, mask) return self.size_weight * tf.reduce_mean(masked_loss) + + + def add_sin_difference(self, y_true, y_pred, factor=1.0): + if factor != 1.0: + y_true = factor * y_true + y_pred = factor * y_pred + rad_pred_encoding = tf.math.sin(y_pred) * tf.math.cos(y_true) + rad_tg_encoding = tf.math.cos(y_pred) * tf.math.sin(y_true) + return rad_tg_encoding, rad_pred_encoding + def angle_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + y_true, y_pred = self.add_sin_difference(y_true, y_pred, self.angle_weight) loss = tf.compat.v1.losses.huber_loss(y_true, y_pred, reduction="none") diff --git a/network.py b/network.py index 4108aec..3d697eb 100644 --- a/network.py +++ b/network.py @@ -1,6 +1,7 @@ import tensorflow as tf import numpy as np -from config import Parameters +# from config import Parameters +from config_v2 import Parameters def build_point_pillar_graph(params: Parameters): diff --git a/point_pillars_custom_prediction.py b/point_pillars_custom_prediction.py new file mode 100644 index 0000000..96cc9e6 --- /dev/null +++ b/point_pillars_custom_prediction.py @@ -0,0 +1,157 @@ +import os +from glob import glob +import numpy as np +import tensorflow as tf +# from processors import SimpleDataGenerator +# from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator +from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array +from readers import KittiDataReader +from config import Parameters +from network import build_point_pillar_graph + + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" +MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_Input_Coordinate_Analysis_v2" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + # pillar_net = build_point_pillar_graph(params) + # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + # pillar_net.summary() + + # data_reader = KittiDataReader() + + # lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne", "*.bin"))) + # label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2", "*.txt"))) + # calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib", "*.txt"))) + # assert len(lidar_files) == len(label_files) == len(calibration_files), "Input dirs require equal number of files." + # eval_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files, label_files, calibration_files) + + + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_input_coordinate_analysis_point_pillar_v2" + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + + + + + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, + # npoints=20000, split='train', classes=list(params.classes_map.keys()), + # random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) + + for batch_idx in range(0,20): + [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + + # occupancy, position, size, angle, heading, classification = pillar_net.predict([pillars, voxels]) + + # angle = limit_period(angle, offset=0.5, period=2*np.pi) + + + # occupancy[:,:,:,:2] = 0 + + # print(occupancy.shape) + # exit() + + set_boxes, confidences = [], [] + loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + for i in range(loop_range): + # set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + # heading[i], + # classification[i], params.anchor_dims, occ_threshold=0.15) + + gt_boxes3d_ = gt_boxes3d[i] + + print(gt_boxes3d_.shape) + gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + gt_boxes3d_[:,0], + gt_boxes3d_[:,6]], axis=1) + + + gt_bbox_params_list = gt_bbox_params.tolist() + for k in range(len(gt_bbox_params_list)): + gt_bbox_params_list[k].append("Green") + gt_bbox_params_list[k].append("1.0") + + # if len(set_box) > 0: + # predicted_boxes3d_ = predicted_boxes3d + # # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) + + # # print(predicted_boxes3d_.shape) + # # print(predicted_boxes3d_) + # # print(size[i]) + + # bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + # predicted_boxes3d_[:,1], (predicted_boxes3d_[:,2] - predicted_boxes3d_[:,5] / 2) , + # predicted_boxes3d_[:,0], + # predicted_boxes3d_[:,6]], axis=1) + + + # # bbox_params = np.stack([predicted_boxes3d[:,4], predicted_boxes3d[:,5], predicted_boxes3d[:,3], + # # predicted_boxes3d[:,1], -(predicted_boxes3d[:,2] - predicted_boxes3d[:,5] / 2), + # # predicted_boxes3d[:,0], + # # predicted_boxes3d[:,6]], axis=1) + + # bbox_params_list = bbox_params.tolist() + # # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + # for k in range(predicted_boxes3d.shape[0]): + # bbox_params_list[k].append("Magenta") + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + params.map_classes[int(predicted_boxes3d[k,8])]) + # gt_bbox_params_list.append(bbox_params_list[k]) + + coor = pts_input[i][:,[1,2,0]] + # coor[:,1] *= -1 + Converter.compile("train_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + + # set_boxes.append(set_box) + # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, + # # classification, params.anchor_dims, occ_threshold=0.1)) + # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) + + # sum_bboxes = 0 + # for h in range(len(set_boxes)): + # sum_bboxes += len(set_boxes[h]) + + # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) + # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) + # exit() + # print(set_boxes[-1]) + + # # NMS + # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) + + # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) + + # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes + # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) + # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) + # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): + # print("---------- New Scenario ---------- ") + # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) + # print("---------- ------------ ---------- ") + # for gt in gt_label: + # print(gt) + # for pred in seq_boxes: + # print(pred) diff --git a/point_pillars_custom_processors_v2.py b/point_pillars_custom_processors_v2.py new file mode 100644 index 0000000..3c825c4 --- /dev/null +++ b/point_pillars_custom_processors_v2.py @@ -0,0 +1,372 @@ +from typing import List, Any +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras.utils.data_utils import Sequence + +from config_v2 import Parameters +# from point_pillars import createPillars, createPillarsTarget +from point_pillars_v2 import createPillars, createPillarsTarget +# from readers import DataReader, Label3D +from sklearn.utils import shuffle +import sys + +from det3d.pc_kitti_dataset import PCKittiAugmentedDataset + +from point_viz.converter import PointvizConverter + + +def select_best_anchors(arr): + dims = np.indices(arr.shape[1:]) + # arr[..., 0:1] gets the occupancy value from occ in {-1, 0, 1}, i.e. {bad match, neg box, pos box} + ind = (np.argmax(arr[..., 0:1], axis=0),) + tuple(dims) + + return arr[ind] + + +class DataProcessor(Parameters): + + def __init__(self, **kwargs): + super(DataProcessor, self).__init__(**kwargs) + anchor_dims = np.array(self.anchor_dims, dtype=np.float32) + self.anchor_dims = anchor_dims[:, 0:3] + self.anchor_z = anchor_dims[:, 3] + self.anchor_yaw = anchor_dims[:, 4] + # Counts may be used to make statistic about how well the anchor boxes fit the objects + self.pos_cnt, self.neg_cnt = 0, 0 + + def make_point_pillars(self, points: np.ndarray): + + assert points.ndim == 2 + assert points.shape[1] == 4 + assert points.dtype == np.float32 + + pillars, indices = createPillars(points, + self.max_points_per_pillar, + self.max_pillars, + self.x_step, + self.y_step, + self.x_min, + self.x_max, + self.y_min, + self.y_max, + self.z_min, + self.z_max, + False) + + return pillars, indices + + def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): + """ Generate the ground truth label for each pillars + + Args: + gt_boxes_3d (numpy[float]): A list of floats containing [x, y, z, h, w, l, ry] + gt_cls_type_list (List[str]): A list of floats containing [cls_type] + + Returns: + [type]: [description] + """ + + # filter labels by classes (cars, pedestrians and Trams) + # Label has 4 properties (Classification (0th index of labels file), + # centroid coordinates, dimensions, yaw) + # labels = list(filter(lambda x: x.classification in self.classes, labels)) + + + + if len(gt_boxes_3d) == 0: + pX, pY = int(self.Xn / self.downscaling_factor), int(self.Yn / self.downscaling_factor) + a = int(self.anchor_dims.shape[0]) + return np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), \ + np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), np.zeros((pX, pY, a), dtype='float32'), \ + np.zeros((pX, pY, a, self.nb_classes), dtype='float64') + + # For each label file, generate these properties except for the Don't care class + + # target_positions = np.array([label.centroid for label in labels], dtype=np.float32) + # target_dimension = np.array([label.dimension for label in labels], dtype=np.float32) + # target_yaw = np.array([label.yaw for label in labels], dtype=np.float32) + # target_class = np.array([self.classes[label.classification] for label in labels], dtype=np.int32) + + target_positions = gt_boxes_3d[:,:3] + target_dimension = gt_boxes_3d[:,3:6] # don't have to translate again + target_yaw = gt_boxes_3d[:, 6] + # print(type(self.classes)) + # print(type(self.classes_map)) + # # print(gt_cls_type_list[0]) + # print(self.classes_map[gt_cls_type_list[0]]) + + target_class = np.array([self.classes_map[gt_cls_type_list[k]] for k in range(len(gt_cls_type_list))], dtype=np.int32) + + assert np.all(target_yaw >= -np.pi) & np.all(target_yaw <= np.pi) + assert len(target_positions) == len(target_dimension) == len(target_yaw) == len(target_class) + + target, pos, neg = createPillarsTarget(target_positions, + target_dimension, + target_yaw, + target_class, + self.anchor_dims, + self.anchor_z, + self.anchor_yaw, + self.positive_iou_threshold, + self.negative_iou_threshold, + self.nb_classes, + self.downscaling_factor, + self.x_step, + self.y_step, + self.x_min, + self.x_max, + self.y_min, + self.y_max, + self.z_min, + self.z_max, + False) + self.pos_cnt += pos + self.neg_cnt += neg + + # return a merged target view for all objects in the ground truth and get categorical labels + # print("target.shape: ", target.shape) + sel = select_best_anchors(target) + ohe = tf.keras.utils.to_categorical(sel[..., 9], num_classes=self.nb_classes, dtype='float64') + # print("self.shape: ", sel[...,0].shape) + + return sel[..., 0], sel[..., 1:4], sel[..., 4:7], sel[..., 7], sel[..., 8], ohe + + +class CustomDataGenerator(DataProcessor, Sequence, PCKittiAugmentedDataset): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, + gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): + + super(CustomDataGenerator, self).__init__( + batch_size=batch_size, root_dir=root_dir, + npoints=npoints, split=split, classes=classes, + random_select=random_select, gt_database_dir=gt_database_dir, + aug_hard_ratio=aug_hard_ratio, **kwargs + ) + # self.data_reader = data_reader + self.batch_size = batch_size + self.sample_id_list=self.get_sample_id_list() + self.split = split + + + def get_sample(self, index): + return super().get_sample(index) + + + def __len__(self): + return len(self.sample_id_list) // self.batch_size + + def __getitem__(self, batch_id: int): + file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) + # print("inside getitem") + pillars = [] + voxels = [] + occupancy = [] + position = [] + size = [] + angle = [] + heading = [] + classification = [] + + + + for i in file_ids: + sample = self.get_sample(i) + # For each file, dividing the space into a x-y grid to create pillars + pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) + pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) + + gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) + + gt_boxes3d = np.concatenate(( + gt_boxes3d_xyz[:,0,np.newaxis], # 0 x + gt_boxes3d_xyz[:,1,np.newaxis], # 1 y + gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z + sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label + sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label + sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label + -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry + ), axis=1) + + # Voxels are the pillar ids + pillars_, voxels_ = self.make_point_pillars(pts_input) + + pillars.append(pillars_) + voxels.append(voxels_) + + + if self.split=='train' or self.split =='val': + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + gt_boxes3d, sample['gt_cls_type_list']) + + occupancy.append(occupancy_) + position.append(position_) + size.append(size_) + angle.append(angle_) + heading.append(heading_) + classification.append(classification_) + + pillars = np.concatenate(pillars, axis=0) + voxels = np.concatenate(voxels, axis=0) + + if self.split=='train' or self.split =='val': + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + return [pillars, voxels], [occupancy, position, size, angle, heading, classification] + else: + return [pillars, voxels] + + def on_epoch_end(self): + # print("inside epoch") + if self.split=='train' or self.split =='val': + self.sample_id_list=shuffle(self.sample_id_list) + + +class AnalyseCustomDataGenerator(CustomDataGenerator): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, + gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): + + super(AnalyseCustomDataGenerator, self).__init__( + batch_size=batch_size, root_dir=root_dir, + npoints=npoints, split=split, classes=classes, + random_select=random_select, gt_database_dir=gt_database_dir, + aug_hard_ratio=aug_hard_ratio, **kwargs + ) + # self.data_reader = data_reader + self.batch_size = batch_size + self.sample_id_list=self.get_sample_id_list() + self.split = split + + + def get_sample(self, index): + return super().get_sample(index) + + + + # def convert_labels_into_point_viz_format(self, gt_boxes3d): + # gt_boxes3d = gt_boxes3d[:,[3,4,5,0,1,2, 6]] # [xyz,3l4w5h,ry] => [3l,5h,4w] + # gt_boxes3d[:,5] -= (gt_boxes3d[:,2] /2) + # return gt_boxes3d + + def __len__(self): + return len(self.sample_id_list) // self.batch_size + + def __getitem__(self, batch_id: int): + file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) + # print("inside getitem") + pillars = [] + voxels = [] + occupancy = [] + position = [] + size = [] + angle = [] + heading = [] + classification = [] + + pts_input_ = [] + gt_boxes3d_ = [] + sample_ = [] + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" + # # Initialize and setup output directory. + # Converter = PointvizConverter(save_viz_path) + + for i in file_ids: + # print(i) + # print(type(i)) + sample = self.get_sample(i) + # For each file, dividing the space into a x-y grid to create pillars + pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) + # print(pts_lidar.shape) + + pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) + + gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) + + # print(gt_boxes3d_xyz.shape) + + gt_boxes3d = np.concatenate(( + gt_boxes3d_xyz[:,0,np.newaxis], # 0 x + gt_boxes3d_xyz[:,1,np.newaxis], # 1 y + gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z + sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label + sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label + sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label + -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry + ), axis=1) + + # print(type(gt_boxes3d)) + # gt_boxes3d = self.limit_yaw(gt_boxes3d) + + # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) + # print(bbox_params.shape) + # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], + # bbox_params=bbox_params) + + + # exit() + + # print(pts_input.shape) + # Voxels are the pillar ids + pillars_, voxels_ = self.make_point_pillars(pts_input) + + print(pillars_.shape, voxels_.shape) + # for i in range(10): + # print(pillars_[0,0,i,:]) + # print(np.sum(pillars_ > 0)) + # exit() + + pillars.append(pillars_) + voxels.append(voxels_) + + # print(sample['gt_cls_type_list']) + + if self.split=='train' or self.split =='val': + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + gt_boxes3d, sample['gt_cls_type_list']) + + # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) + + + occupancy.append(occupancy_) + position.append(position_) + size.append(size_) + angle.append(angle_) + heading.append(heading_) + classification.append(classification_) + + sample_.append(sample) + gt_boxes3d_.append(gt_boxes3d) + pts_input_.append(pts_input) + + # exit() + + pillars = np.concatenate(pillars, axis=0) + voxels = np.concatenate(voxels, axis=0) + + if self.split=='train' or self.split =='val': + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input_, gt_boxes3d_, sample_] + else: + return [pillars, voxels] + + def on_epoch_end(self): + # print("inside epoch") + if self.split=='train' or self.split =='val': + self.sample_id_list=shuffle(self.sample_id_list) + \ No newline at end of file diff --git a/point_pillars_prediction.py b/point_pillars_prediction.py index 58c60a3..75f5406 100644 --- a/point_pillars_prediction.py +++ b/point_pillars_prediction.py @@ -2,13 +2,18 @@ from glob import glob import numpy as np import tensorflow as tf -from processors import SimpleDataGenerator +from processors import SimpleDataGenerator, AnalyseSimpleDataGenerator from inference_utils import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker, rotational_nms +from inference_utils import generate_bboxes_from_pred_and_np_array from readers import KittiDataReader from config import Parameters from network import build_point_pillar_graph +from inference_utils import inverse_yaw_element -DATA_ROOT = "../training" +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" +# MODEL_ROOT = "./logs_Car_Pedestrian_Original_2" MODEL_ROOT = "./logs" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" @@ -16,6 +21,11 @@ if __name__ == "__main__": + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/prediction" + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + params = Parameters() pillar_net = build_point_pillar_graph(params) pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) @@ -23,35 +33,67 @@ data_reader = KittiDataReader() - lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne", "*.bin"))) - label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2", "*.txt"))) - calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib", "*.txt"))) + lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne", "*.bin")))[:100] + print(len(lidar_files)) + print() + label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2", "*.txt")))[:100] + calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib", "*.txt")))[:100] assert len(lidar_files) == len(label_files) == len(calibration_files), "Input dirs require equal number of files." - eval_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files, label_files, calibration_files) - - occupancy, position, size, angle, heading, classification = pillar_net.predict(eval_gen, - batch_size=params.batch_size) - set_boxes, confidences = [], [] - loop_range = occupancy.shape[0] if len(occupancy.shape) == 4 else 1 - for i in range(loop_range): - set_boxes.append(generate_bboxes_from_pred(occupancy[i], position[i], size[i], angle[i], heading[i], - classification[i], params.anchor_dims, occ_threshold=0.7)) - confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) - print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) + eval_gen = AnalyseSimpleDataGenerator(data_reader, params.batch_size, lidar_files, label_files, calibration_files) + + + for batch_idx in range(0,10): + [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input, gt_boxes3d] \ + = eval_gen[batch_idx] + + # exit() + + occupancy, position, size, angle, heading, classification = pillar_net.predict([pillars, voxels]) + set_boxes, confidences = [], [] + loop_range = occupancy.shape[0] if len(occupancy.shape) == 4 else 1 + for i in range(loop_range): + set_box, prediction = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], heading[i], + classification[i], params.anchor_dims, occ_threshold=0.3) + + if len(set_box) == 0: + continue + set_boxes.append(set_box) + # set_boxes.append(generate_bboxes_from_pred(occupancy[i], position[i], size[i], angle[i], heading[i], + # classification[i], params.anchor_dims, occ_threshold=0.3)) + confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) + + # print(set_boxes[0]) + # print(np.array(set_boxes[0]).shape) + # print(prediction.shape) + gt_boxes3d_ = [] + for j in range(len(gt_boxes3d[i])): + bbox = gt_boxes3d[i][j] + gt_boxes3d_.append([bbox.dimension[1], bbox.dimension[2], bbox.dimension[0], + bbox.centroid[1], bbox.centroid[2] + bbox.dimension[2]/2, bbox.centroid[0] + , -bbox.yaw]) + gt_boxes3d_np = np.array(gt_boxes3d_) + print(gt_boxes3d_np.shape) + + Converter.compile("eval_sample_{}".format(batch_idx*params.batch_size + i), coors=pts_input[i][:,[1,2,0]], intensity=pts_input[i][:,3], + bbox_params=gt_boxes3d_np) + # bbox_params=gt_boxes3d_np[:,[3,5,4,1,2,0,6]]) + # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) # NMS - nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) - - print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) - - # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes - gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) - gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) - for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): - print("---------- New Scenario ---------- ") - focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) - print("---------- ------------ ---------- ") - for gt in gt_label: - print(gt) - for pred in seq_boxes: - print(pred) + # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.3, nms_iou_thr=0.5) + + # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) + + + + # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes + # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) + # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) + # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): + # print("---------- New Scenario ---------- ") + # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) + # print("---------- ------------ ---------- ") + # for gt in gt_label: + # print(gt) + # for pred in seq_boxes: + # print(pred) diff --git a/point_pillars_training_custom_run_v2.py b/point_pillars_training_custom_run_v2.py new file mode 100644 index 0000000..196003d --- /dev/null +++ b/point_pillars_training_custom_run_v2.py @@ -0,0 +1,87 @@ +import os +import time +import numpy as np +import tensorflow as tf +from glob import glob + +# from config import Parameters +from config_v2 import Parameters +from loss import PointPillarNetworkLoss +from network import build_point_pillar_graph +# from processors import SimpleDataGenerator +# from custom_processors import CustomDataGenerator +from point_pillars_custom_processors_v2 import CustomDataGenerator +from readers import KittiDataReader + +# from point_viz.converter import PointvizConverter + +tf.get_logger().setLevel("ERROR") + +# DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" # TODO make main arg +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" +MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" + +if __name__ == "__main__": + + params = Parameters() + + pillar_net = build_point_pillar_graph(params) + # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + + loss = PointPillarNetworkLoss(params) + + optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate, decay=params.decay_rate) + + pillar_net.compile(optimizer, loss=loss.losses()) + + # gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + gt_database_dir = None + + training_gen = CustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, + npoints=20000, split='train', classes=list(params.classes_map.keys()), + random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) + + # validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + # npoints=20000, split='val', classes=list(params.classes_map.keys())) + + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" + # Initialize and setup output directory. + # Converter = PointvizConverter(save_viz_path) + + + + # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) + # print(bbox_params.shape) + # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], + # bbox_params=bbox_params) + + + log_dir = MODEL_ROOT + epoch_to_decay = int( + np.round(params.iters_to_decay / params.batch_size * int(len(training_gen)))) + callbacks = [ + tf.keras.callbacks.TensorBoard(log_dir=log_dir), + tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(log_dir, "model.h5"), + monitor='loss', save_best_only=True), + tf.keras.callbacks.LearningRateScheduler( + lambda epoch, lr: lr * 0.8 if ((epoch % epoch_to_decay == 0) and (epoch != 0)) else lr, verbose=True), + # tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'), + ] + + try: + pillar_net.fit(training_gen, + # validation_data = validation_gen, + steps_per_epoch=len(training_gen), + callbacks=callbacks, + use_multiprocessing=True, + epochs=int(params.total_training_epochs), + workers=6) + except KeyboardInterrupt: + model_str = "interrupted_%s.h5" % time.strftime("%Y%m%d-%H%M%S") + pillar_net.save(os.path.join(log_dir, model_str)) + print("Interrupt. Saving output to %s" % os.path.join(os.getcwd(), log_dir[1:], model_str)) diff --git a/point_pillars_training_run.py b/point_pillars_training_run.py index bc1c517..2dd1c27 100644 --- a/point_pillars_training_run.py +++ b/point_pillars_training_run.py @@ -8,22 +8,23 @@ from loss import PointPillarNetworkLoss from network import build_point_pillar_graph from processors import SimpleDataGenerator +# from custom_processors import CustomDataGenerator from readers import KittiDataReader tf.get_logger().setLevel("ERROR") -DATA_ROOT = "../training" # TODO make main arg -MODEL_ROOT = "./logs" +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg +MODEL_ROOT = "./logs_Car_Pedestrian_Original_2" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "0" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" if __name__ == "__main__": params = Parameters() pillar_net = build_point_pillar_graph(params) - pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) loss = PointPillarNetworkLoss(params) @@ -43,8 +44,12 @@ validation_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files[-validation_len:], label_files[-validation_len:], calibration_files[-validation_len:]) log_dir = MODEL_ROOT + # epoch_to_decay = int( + # np.round(params.iters_to_decay / params.batch_size * int(np.ceil(float(len(label_files)) / params.batch_size)))) + epoch_to_decay = int( - np.round(params.iters_to_decay / params.batch_size * int(np.ceil(float(len(label_files)) / params.batch_size)))) + np.round(params.iters_to_decay / params.batch_size * int(len(training_gen)))) + callbacks = [ tf.keras.callbacks.TensorBoard(log_dir=log_dir), tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(log_dir, "model.h5"), @@ -59,9 +64,9 @@ validation_data = validation_gen, steps_per_epoch=len(training_gen), callbacks=callbacks, - use_multiprocessing=True, - epochs=int(params.total_training_epochs), - workers=6) + # use_multiprocessing=True, + epochs=int(params.total_training_epochs)) + # workers=6) except KeyboardInterrupt: model_str = "interrupted_%s.h5" % time.strftime("%Y%m%d-%H%M%S") pillar_net.save(os.path.join(log_dir, model_str)) diff --git a/point_pillars_visualize_input.py b/point_pillars_visualize_input.py new file mode 100644 index 0000000..91ce836 --- /dev/null +++ b/point_pillars_visualize_input.py @@ -0,0 +1,145 @@ +import os +from glob import glob +import numpy as np +import tensorflow as tf +# from processors import SimpleDataGenerator +# from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator +from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker +from inference_utils_v2 import rotational_nms, generate_bboxes_from_pred_and_np_array +from readers import KittiDataReader +from config_v2 import Parameters +from network import build_point_pillar_graph + + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" +MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_Input_Coordinate_Analysis_v2" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_only" + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_labels_only" + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_and_labels" + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=16384, split='val',random_select=False, classes=list(params.classes_map.keys())) + + for batch_idx in range(0,20): + [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + + + set_boxes, confidences = [], [] + loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + for i in range(loop_range): + # set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + # heading[i], + # classification[i], params.anchor_dims, occ_threshold=0.15) + gt_set_box, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + heading_[i], + classification_[i], params.anchor_dims, occ_threshold=0.5) + + # exit() + gt_boxes3d_ = gt_boxes3d[i] + + print(gt_boxes3d_.shape) + gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + gt_boxes3d_[:,0], + gt_boxes3d_[:,6]], axis=1) + + + gt_bbox_params_list = gt_bbox_params.tolist() + # gt_bbox_params_list = [] + # print(gt_bbox_params_list) + # print(len(gt_bbox_params_list)) + # print(len(gt_bbox_params_list[0])) + + for k in range(len(gt_bbox_params_list)): + msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) + gt_bbox_params_list[k].append("Green") + gt_bbox_params_list[k].append(msg) + + if len(gt_set_box) > 0: + decoded_gt_boxes3d_ = decoded_gt_boxes3d + # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) + + print(decoded_gt_boxes3d_.shape) + # print(predicted_boxes3d_) + # print(size[i]) + + bbox_params = np.stack([decoded_gt_boxes3d_[:,3], decoded_gt_boxes3d_[:,5], decoded_gt_boxes3d_[:,4], + decoded_gt_boxes3d_[:,1], decoded_gt_boxes3d_[:,2] , + decoded_gt_boxes3d_[:,0], + decoded_gt_boxes3d_[:,6]], axis=1) + + + # bbox_params = np.stack([predicted_boxes3d[:,4], predicted_boxes3d[:,5], predicted_boxes3d[:,3], + # predicted_boxes3d[:,1], -(predicted_boxes3d[:,2] - predicted_boxes3d[:,5] / 2), + # predicted_boxes3d[:,0], + # predicted_boxes3d[:,6]], axis=1) + + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(decoded_gt_boxes3d.shape[0]): + msg = "%.5f, %.5f"%(bbox_params_list[k][3],bbox_params_list[k][5]) + # msg = (str(bbox_params_list[k][3:5])) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(decoded_gt_boxes3d[k,9]) + params.map_classes[int(decoded_gt_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) + + # print(gt_bbox_params_list) + # print(gt_bbox_params.tolist()) + + coor = pts_input[i][:,[1,2,0]] + # coor[:,1] *= -1 + Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + + # exit() + # set_boxes.append(set_box) + # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, + # # classification, params.anchor_dims, occ_threshold=0.1)) + # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) + + # sum_bboxes = 0 + # for h in range(len(set_boxes)): + # sum_bboxes += len(set_boxes[h]) + + # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) + # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) + # exit() + # print(set_boxes[-1]) + + # # NMS + # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) + + # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) + + # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes + # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) + # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) + # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): + # print("---------- New Scenario ---------- ") + # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) + # print("---------- ------------ ---------- ") + # for gt in gt_label: + # print(gt) + # for pred in seq_boxes: + # print(pred) diff --git a/processors.py b/processors.py index 05b1412..dee118e 100644 --- a/processors.py +++ b/processors.py @@ -11,6 +11,8 @@ import sys +from point_viz.converter import PointvizConverter + def select_best_anchors(arr): dims = np.indices(arr.shape[1:]) # arr[..., 0:1] gets the occupancy value from occ in {-1, 0, 1}, i.e. {bad match, neg box, pos box} @@ -33,16 +35,30 @@ def __init__(self): def transform_labels_into_lidar_coordinates(labels: List[Label3D], R: np.ndarray, t: np.ndarray): transformed = [] for label in labels: - label.centroid = label.centroid @ np.linalg.inv(R).T - t - label.dimension = label.dimension[[2, 1, 0]] + label.centroid = (label.centroid - t) @ np.linalg.inv(R).T + label.dimension = label.dimension[[2, 1, 0]] # h w l => l ,w ,h label.yaw -= np.pi / 2 while label.yaw < -np.pi: + print("smaller than -pi") label.yaw += (np.pi * 2) while label.yaw > np.pi: + print("larger than pi") label.yaw -= (np.pi * 2) transformed.append(label) return labels + + def convert_labels_into_point_viz_format(self, labels: List[Label3D]): + label_list = [] + + for label in labels: + label_ = [label.dimension[2], label.dimension[0], label.dimension[1]] + label_.extend([label.centroid[0], label.centroid[1], label.centroid[2]]) + label_.extend([label.yaw]) + label_list.append(label_) + + return np.array(label_list) + def make_point_pillars(self, points: np.ndarray): assert points.ndim == 2 @@ -69,7 +85,7 @@ def make_ground_truth(self, labels: List[Label3D]): # filter labels by classes (cars, pedestrians and Trams) # Label has 4 properties (Classification (0th index of labels file), # centroid coordinates, dimensions, yaw) - labels = list(filter(lambda x: x.classification in self.classes, labels)) + labels = list(filter(lambda x: x.classification in self.classes_map, labels)) if len(labels) == 0: pX, pY = int(self.Xn / self.downscaling_factor), int(self.Yn / self.downscaling_factor) @@ -82,7 +98,7 @@ def make_ground_truth(self, labels: List[Label3D]): target_positions = np.array([label.centroid for label in labels], dtype=np.float32) target_dimension = np.array([label.dimension for label in labels], dtype=np.float32) target_yaw = np.array([label.yaw for label in labels], dtype=np.float32) - target_class = np.array([self.classes[label.classification] for label in labels], dtype=np.int32) + target_class = np.array([self.classes_map[label.classification] for label in labels], dtype=np.int32) assert np.all(target_yaw >= -np.pi) & np.all(target_yaw <= np.pi) assert len(target_positions) == len(target_dimension) == len(target_yaw) == len(target_class) @@ -151,12 +167,17 @@ def __getitem__(self, batch_id: int): heading = [] classification = [] + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/original_processor" + # # Initialize and setup output directory. + # Converter = PointvizConverter(save_viz_path) + for i in file_ids: lidar = self.data_reader.read_lidar(self.lidar_files[i]) # For each file, dividing the space into a x-y grid to create pillars # Voxels are the pillar ids pillars_, voxels_ = self.make_point_pillars(lidar) + # print(pillars_.shape, voxels_.shape) pillars.append(pillars_) voxels.append(voxels_) @@ -166,11 +187,30 @@ def __getitem__(self, batch_id: int): # Labels are transformed into the lidar coordinate bounding boxes # Label has 7 values, centroid, dimensions and yaw value. label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t) + + + # # Pass data and create html files. + # pts_rect = lidar[:,:3] + # intensity = lidar[:,3] + # # sample_info['pts_rect'][:,1] *= -1 # mirror the y axis + # # pts_rect[:,1] *= -1 + # # coors = sample_info['pts_rect'] + # bbox_params = self.convert_labels_into_point_viz_format(label_transformed) + # print(bbox_params) + # Converter.compile("ori_sample_{}".format(i), coors=pts_rect, intensity=intensity, + # bbox_params=bbox_params) + + + # exit() + # These definitions can be found in point_pillars.cpp file # We are splitting a 10 dim vector that contains this information. occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( label_transformed) + # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) + # exit() + occupancy.append(occupancy_) position.append(position_) size.append(size_) @@ -197,3 +237,100 @@ def on_epoch_end(self): if self.label_files is not None: self.lidar_files, self.label_files, self.calibration_files = \ shuffle(self.lidar_files, self.label_files, self.calibration_files) + + + +class AnalyseSimpleDataGenerator(DataProcessor, Sequence): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, data_reader: DataReader, batch_size: int, lidar_files: List[str], label_files: List[str] = None, + calibration_files: List[str] = None): + super(AnalyseSimpleDataGenerator, self).__init__() + self.data_reader = data_reader + self.batch_size = batch_size + self.lidar_files = lidar_files + self.label_files = label_files + self.calibration_files = calibration_files + + assert (calibration_files is None and label_files is None) or \ + (calibration_files is not None and label_files is not None) + + if self.calibration_files is not None: + assert len(self.calibration_files) == len(self.lidar_files) + assert len(self.label_files) == len(self.lidar_files) + + def __len__(self): + return len(self.lidar_files) // self.batch_size + + def __getitem__(self, batch_id: int): + file_ids = np.arange(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) + # print("inside getitem") + pillars = [] + voxels = [] + occupancy = [] + position = [] + size = [] + angle = [] + heading = [] + classification = [] + pts_input = [] + gt_boxes3d = [] + + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/original_processor" + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + for i in file_ids: + lidar = self.data_reader.read_lidar(self.lidar_files[i]) + + + Converter.compile("transform_sample_{}".format(i), coors=lidar[:,:3], intensity=lidar[:,3]) + + # For each file, dividing the space into a x-y grid to create pillars + # Voxels are the pillar ids + pillars_, voxels_ = self.make_point_pillars(lidar) + + # print(pillars_.shape, voxels_.shape) + pillars.append(pillars_) + voxels.append(voxels_) + + if self.label_files is not None: + label = self.data_reader.read_label(self.label_files[i]) + R, t = self.data_reader.read_calibration(self.calibration_files[i]) + # Labels are transformed into the lidar coordinate bounding boxes + # Label has 7 values, centroid, dimensions and yaw value. + label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t) + + # These definitions can be found in point_pillars.cpp file + # We are splitting a 10 dim vector that contains this information. + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + label_transformed) + + occupancy.append(occupancy_) + position.append(position_) + size.append(size_) + angle.append(angle_) + heading.append(heading_) + classification.append(classification_) + pts_input.append(lidar) + gt_boxes3d.append(label_transformed) + + pillars = np.concatenate(pillars, axis=0) + voxels = np.concatenate(voxels, axis=0) + + if self.label_files is not None: + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input, gt_boxes3d] + else: + return [pillars, voxels] + + def on_epoch_end(self): + # print("inside epoch") + if self.label_files is not None: + self.lidar_files, self.label_files, self.calibration_files = \ + shuffle(self.lidar_files, self.label_files, self.calibration_files) \ No newline at end of file diff --git a/readers.py b/readers.py index b8ca8d9..c2a4880 100644 --- a/readers.py +++ b/readers.py @@ -8,7 +8,7 @@ class Label3D: def __init__(self, classification: str, centroid: np.ndarray, dimension: np.ndarray, yaw: float): self.classification = classification self.centroid = centroid - self.dimension = dimension + self.dimension = dimension # hwl self.yaw = yaw def __str__(self): diff --git a/src/point_pillars.cpp b/src/point_pillars.cpp index 4c2127d..6451f6c 100644 --- a/src/point_pillars.cpp +++ b/src/point_pillars.cpp @@ -7,6 +7,7 @@ #include #include #include +// #include namespace py = pybind11; struct IntPairHash { @@ -535,10 +536,10 @@ std::tuple, int, int> createPillarsTarget(const pybind1 negCnt++; if (printTime) { -// std::cout << "\nThere was no sufficiently overlapping anchor anywhere for object " << objectCount << std::endl; -// py::print("There was no sufficiently overlapping anchor anywhere for object " +str(objectCount)); -// std::cout << "Best IOU was " << maxIou << ". Adding the best location regardless of threshold." << std::endl; -// py::print("Best IOU was "+str(maxIou)+" Adding the best location regardless of threshold"); + // std::cout << "\nThere was no sufficiently overlapping anchor anywhere for object " << objectCount << std::endl; + // py::print("There was no sufficiently overlapping anchor anywhere for object " +std::to_string(objectCount)); + // std::cout << "Best IOU was " << maxIou << ". Adding the best location regardless of threshold." << std::endl; + // py::print("Best IOU was "+std::to_string(maxIou)+" Adding the best location regardless of threshold"); } const auto xId = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); diff --git a/src/point_pillars_v2.cpp b/src/point_pillars_v2.cpp new file mode 100644 index 0000000..85a240e --- /dev/null +++ b/src/point_pillars_v2.cpp @@ -0,0 +1,634 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include +namespace py = pybind11; + +struct IntPairHash { + std::size_t operator()(const std::pair &p) const { + assert(sizeof(std::size_t)>=8); + //Shift first integer over to make room for the second integer. The two are + //then packed side by side. + return (((uint64_t)p.first)<<32) | ((uint64_t)p.second); + } +}; + +struct PillarPoint { + float x; + float y; + float z; + float intensity; + float xc; + float yc; + float zc; + float xp; + float yp; +}; + +pybind11::tuple createPillars(pybind11::array_t points, + int maxPointsPerPillar, + int maxPillars, + float xStep, + float yStep, + float xMin, + float xMax, + float yMin, + float yMax, + float zMin, + float zMax, + bool printTime = false) +{ + std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); + + if (points.ndim() != 2 || points.shape()[1] != 4) + { + throw std::runtime_error("numpy array with shape (n, 4) expected (n being the number of points)"); + } + + std::unordered_map, std::vector, IntPairHash> map; + + for (int i = 0; i < points.shape()[0]; ++i) + { + if ((points.at(i, 0) < xMin) || (points.at(i, 0) >= xMax) || \ + (points.at(i, 1) < yMin) || (points.at(i, 1) >= yMax) || \ + (points.at(i, 2) < zMin) || (points.at(i, 2) >= zMax)) + { + continue; + } + + auto xIndex = static_cast(std::floor((points.at(i, 0) - xMin) / xStep)); + auto yIndex = static_cast(std::floor((points.at(i, 1) - yMin) / yStep)); + + PillarPoint p = { + points.at(i, 0), // x + points.at(i, 1), // y + points.at(i, 2), // z + points.at(i, 3), // intensity + 0, // xc + 0, // yc + 0, // zc + 0, // xp + 0, // yp + }; + + map[{xIndex, yIndex}].emplace_back(p); + } + + pybind11::array_t tensor; + pybind11::array_t indices; + + tensor.resize({1, maxPillars, maxPointsPerPillar, 9}); + indices.resize({1, maxPillars, 3}); + + int pillarId = 0; + for (auto& pair: map) + { + if (pillarId >= maxPillars) + { + break; + } + + float xMean = 0; + float yMean = 0; + float zMean = 0; + for (const auto& p: pair.second) + { + xMean += p.x; + yMean += p.y; + zMean += p.z; + } + xMean /= pair.second.size(); + yMean /= pair.second.size(); + zMean /= pair.second.size(); + + for (auto& p: pair.second) + { + p.xc = p.x - xMean; + p.yc = p.y - yMean; + p.zc = p.z - zMean; + } + + auto xIndex = static_cast(std::floor((xMean - xMin) / xStep)); + auto yIndex = static_cast(std::floor((yMean - yMin) / yStep)); + auto zMid = (zMax - zMin) * 0.5f; + indices.mutable_at(0, pillarId, 1) = xIndex; + indices.mutable_at(0, pillarId, 2) = yIndex; + + int pointId = 0; + for (const auto& p: pair.second) + { + if (pointId >= maxPointsPerPillar) + { + break; + } + + // tensor.mutable_at(0, pillarId, pointId, 0) = p.x - (xIndex * xStep + xMin); + // tensor.mutable_at(0, pillarId, pointId, 1) = p.y - (yIndex * yStep + yMin); + // tensor.mutable_at(0, pillarId, pointId, 2) = p.z - zMid; + // tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; + // tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; + // tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; + // tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; + + tensor.mutable_at(0, pillarId, pointId, 0) = p.x; + tensor.mutable_at(0, pillarId, pointId, 1) = p.y; + tensor.mutable_at(0, pillarId, pointId, 2) = p.z; + tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; + tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; + tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; + tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; + tensor.mutable_at(0, pillarId, pointId, 7) = p.x - (xIndex * xStep + xMin); + tensor.mutable_at(0, pillarId, pointId, 8) = p.y - (yIndex * yStep + yMin); + + pointId++; + } + + pillarId++; + } + + pybind11::tuple result = pybind11::make_tuple(tensor, indices); + + std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast( t2 - t1 ).count(); + if (printTime) + std::cout << "createPillars took: " << static_cast(duration) / 1e6 << " seconds" << std::endl; + + return result; +} + +struct BoundingBox3D +{ + float x; + float y; + float z; + float length; + float width; + float height; + float yaw; + float classId; +}; + +struct Point2D { + float x; + float y; +}; + +typedef std::vector Polyline2D; + +// Returns x-value of point of intersection of two lines +float xIntersect(float x1, float y1, float x2, float y2, + float x3, float y3, float x4, float y4) +{ + float num = (x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4); + float den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4); + return num/den; +} + +// Returns y-value of point of intersection of two lines +float yIntersect(float x1, float y1, float x2, float y2, + float x3, float y3, float x4, float y4) +{ + float num = (x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4); + float den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4); + return num/den; +} + +// Returns area of polygon using the shoelace method +float polygonArea(const Polyline2D &polygon) +{ + float area = 0.0; + + size_t j = polygon.size()-1; + for (size_t i = 0; i < polygon.size(); i++) + { + area += (polygon[j].x + polygon[i].x) * (polygon[j].y - polygon[i].y); + j = i; // j is previous vertex to i + } + + return std::abs(area / 2.0); // Return absolute value +} + +float rotatedX(float x, float y, float angle) +{ + return x * std::cos(angle) - y * std::sin(angle); +} + +float rotatedY(float x, float y, float angle) +{ + return x * std::sin(angle) + y * std::cos(angle); +} + +// Construct bounding box in 2D, coordinates are returned in clockwise order +Polyline2D boundingBox3DToTopDown(const BoundingBox3D &box1) +{ + Polyline2D box; + box.push_back({rotatedX(-0.5 * box1.width, 0.5 * box1.length, + box1.yaw) + box1.x, + rotatedY(-0.5 * box1.width, 0.5 * box1.length, + box1.yaw) + box1.y}); + + box.push_back({rotatedX(0.5 * box1.width, 0.5 * box1.length, + box1.yaw) + box1.x, + rotatedY(0.5 * box1.width, 0.5 * box1.length, + box1.yaw) + box1.y}); + + box.push_back({rotatedX(0.5 * box1.width, -0.5 * box1.length, + box1.yaw) + box1.x, + rotatedY(0.5 * box1.width, -0.5 * box1.length, + box1.yaw) + box1.y}); + + box.push_back({rotatedX(-0.5 * box1.width, -0.5 * box1.length, + box1.yaw) + box1.x, + rotatedY(-0.5 * box1.width, -0.5 * box1.length, + box1.yaw) + box1.y}); + + return box; +} + +// This functions clips all the edges w.r.t one Clip edge of clipping area +// Returns a clipped polygon... +Polyline2D clip(const Polyline2D &poly_points, + float x1, + float y1, + float x2, + float y2) +{ + Polyline2D new_points; + + for (size_t i = 0; i < poly_points.size(); i++) + { + // (ix,iy),(kx,ky) are the co-ordinate values of the points + // i and k form a line in polygon + size_t k = (i+1) % poly_points.size(); + float ix = poly_points[i].x, iy = poly_points[i].y; + float kx = poly_points[k].x, ky = poly_points[k].y; + + // Calculating position of first point w.r.t. clipper line + float i_pos = (x2-x1) * (iy-y1) - (y2-y1) * (ix-x1); + + // Calculating position of second point w.r.t. clipper line + float k_pos = (x2-x1) * (ky-y1) - (y2-y1) * (kx-x1); + + // Case 1 : When both points are inside + if (i_pos < 0 && k_pos < 0) + { + //Only second point is added + new_points.push_back({kx,ky}); + } + + // Case 2: When only first point is outside + else if (i_pos >= 0 && k_pos < 0) + { + // Point of intersection with edge + // and the second point is added + new_points.push_back({xIntersect(x1, y1, x2, y2, ix, iy, kx, ky), + yIntersect(x1, y1, x2, y2, ix, iy, kx, ky)}); + new_points.push_back({kx,ky}); + + } + + // Case 3: When only second point is outside + else if (i_pos < 0 && k_pos >= 0) + { + //Only point of intersection with edge is added + new_points.push_back({xIntersect(x1, y1, x2, y2, ix, iy, kx, ky), + yIntersect(x1, y1, x2, y2, ix, iy, kx, ky)}); + + } + // Case 4: When both points are outside + else + { + //No points are added + } + } + + return new_points; +} + +// Implements Sutherland–Hodgman algorithm +// Returns a polygon with the intersection between two polygons. +Polyline2D sutherlandHodgmanClip(const Polyline2D &poly_points_vector, + const Polyline2D &clipper_points) +{ + Polyline2D clipped_poly_points_vector = poly_points_vector; + for (size_t i=0; i, int, int> createPillarsTarget(const pybind11::array_t& objectPositions, + const pybind11::array_t& objectDimensions, + const pybind11::array_t& objectYaws, + const pybind11::array_t& objectClassIds, + const pybind11::array_t& anchorDimensions, + const pybind11::array_t& anchorZHeights, + const pybind11::array_t& anchorYaws, + float positiveThreshold, + float negativeThreshold, + int nbClasses, + int downscalingFactor, + float xStep, + float yStep, + float xMin, + float xMax, + float yMin, + float yMax, + float zMin, + float zMax, + bool printTime = false) +{ + + std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); + + const auto xSize = static_cast(std::floor((xMax - xMin) / (xStep * downscalingFactor))); +// py::print("xSize", xSize); + const auto ySize = static_cast(std::floor((yMax - yMin) / (yStep * downscalingFactor))); +// py::print("ySize", ySize); + + const int nbAnchors = anchorDimensions.shape()[0]; //4 Number of anchors +// py::print("nbAnchors", nbAnchors); +// Anchor length + + if (nbAnchors <= 0) + { + throw std::runtime_error("Anchor length is zero"); + } + + const int nbObjects = objectDimensions.shape()[0]; //6 Number of labels inside a label.txt file +// BB dimensions from the label file + if (nbObjects <= 0) + { + throw std::runtime_error("Object length is zero"); + } +// py::print("nbObjects", nbObjects); + + // parse numpy arrays +// Preparing the anchor bounding box + std::vector anchorBoxes = {}; + std::vector anchorDiagonals; + for (int i = 0; i < nbAnchors; ++i) + { + BoundingBox3D anchorBox = {}; + anchorBox.x = 0; + anchorBox.y = 0; + anchorBox.length = anchorDimensions.at(i, 0); + anchorBox.width = anchorDimensions.at(i, 1); + anchorBox.height = anchorDimensions.at(i, 2); + anchorBox.z = anchorZHeights.at(i); + anchorBox.yaw = anchorYaws.at(i); + anchorBoxes.emplace_back(anchorBox); // Appends a new anchorBox to the AnchorBoxes container + // Note that anchor box doesn't have a classId as of now. + anchorDiagonals.emplace_back(std::sqrt(std::pow(anchorBox.width, 2) + std::pow(anchorBox.length, 2))); + } + +// Preparing the label bounding box + std::vector labelBoxes = {}; + for (int i = 0; i < nbObjects; ++i) + { + float x = objectPositions.at(i, 0); + float y = objectPositions.at(i, 1); + if (x < xMin | x > xMax | y < yMin | y > yMax) + { + continue; + } + BoundingBox3D labelBox = {}; + labelBox.x = x; + labelBox.y = y; + labelBox.z = objectPositions.at(i, 2); + labelBox.length = objectDimensions.at(i, 0); + labelBox.width = objectDimensions.at(i, 1); + labelBox.height = objectDimensions.at(i, 2); + labelBox.yaw = objectYaws.at(i); + labelBox.classId = objectClassIds.at(i); + labelBoxes.emplace_back(labelBox); + } + + pybind11::array_t tensor; + tensor.resize({nbObjects, xSize, ySize, nbAnchors, 10}); //Tensor of size (6,252,252,4,10) for first file + + pybind11::buffer_info tensor_buffer = tensor.request(); + float *ptr1 = (float *) tensor_buffer.ptr; + // Zero filling the tensor. Every element is presently zero + for (size_t idx = 0; idx < nbObjects * xSize * ySize * nbAnchors * 10; idx++) + { + ptr1[idx] = 0; + } + + int posCnt = 0; + int negCnt = 0; + int objectCount = 0; + if (printTime) + { +// std::cout << "Received " << labelBoxes.size() << " objects" << std::endl; +// py::print("Received "+str(labelBoxes.size())+" objects"); + } + for (const auto& labelBox: labelBoxes) //For every label box which is a 3d bounding box + { + // zone-in on potential spatial area of interest + // Length of (width,length) axis diagonal. + float objectDiameter = std::sqrt(std::pow(labelBox.width, 2) + std::pow(labelBox.length, 2)); + // Offset = Number of grid boxes that can fit on the object diameter + const auto x_offset = static_cast(std::ceil(objectDiameter / (xStep * downscalingFactor))); + const auto y_offset = static_cast(std::ceil(objectDiameter / (yStep * downscalingFactor))); + // Xc = Number of grid boxes that can fit between Xmin (Ymin) and Label's x (y) coordinate + const auto xC = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); + const auto yC = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); + // X(Y)Start = Start from Xc (Yc) - Number of boxes in object's diameter. + // For example the object is located at 5 unites and is 2 unites long. Then X(Y)start will begin + // the search from 3 + const auto xStart = clip(xC - x_offset, 0, xSize); + const auto yStart = clip(yC - y_offset, 0, ySize); + // Similarly end the search at 8 units. Because the object cannot extend beyond that. + const auto xEnd = clip(xC + x_offset, 0, xSize); + const auto yEnd = clip(yC + y_offset, 0, ySize); + + float maxIou = 0; + BoundingBox3D bestAnchor = {}; + int bestAnchorId = 0; + int bestAnchorXId = 0; + int bestAnchorYId = 0; + for (int xId = xStart; xId < xEnd; xId++) // Iterate through every box within search diameter + // In our example case, from 3 till 8 + { + const float x = xId * xStep * downscalingFactor + xMin; + // Getting the real world x coordinate + for (int yId = yStart; yId < yEnd; yId++) // Iterate through every box within search diamter in y axis + { + const float y = yId * yStep * downscalingFactor + yMin; + // Get the real world y coordinates + int anchorCount = 0; + for (auto& anchorBox: anchorBoxes) // For every anchor box (4 in our case) + // Note that we are checking every anchor box for every label in the file + { + anchorBox.x = x; // Assign the real world x and y coordinate to the anchor box + anchorBox.y = y; // Note that anchor boxes originally didn't have Xs and Ys. + // This is because we need ot check them along the X-Y grid. + // However, they did have a z value attached to them. + + const float iouOverlap = iou(anchorBox, labelBox); // Get IOU between two 3D boxes. + + if (maxIou < iouOverlap) + { + maxIou = iouOverlap; + bestAnchor = anchorBox; + bestAnchorId = anchorCount; + bestAnchorXId = xId; + bestAnchorYId = yId; + // if(printTime){ + // if(anchorCount == 3){ + // py::print("\nIoU old: " +std::to_string(iouOverlap) + " new: " + std::to_string(maxIou)); + // } + // } + + } + + if (iouOverlap > positiveThreshold) // Accept the Anchor. Add the anchor details to the tensor. + { + // Tensor at CurrentObject Id, xth grid cell, yth grid cell, currentAnchor, 0 + tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = 1; + + auto diag = anchorDiagonals[anchorCount]; + tensor.mutable_at(objectCount, xId, yId, anchorCount, 1) = (labelBox.x - anchorBox.x) / diag; // delta x,y,z + tensor.mutable_at(objectCount, xId, yId, anchorCount, 2) = (labelBox.y - anchorBox.y) / diag; + tensor.mutable_at(objectCount, xId, yId, anchorCount, 3) = (labelBox.z - anchorBox.z) / anchorBox.height; + + tensor.mutable_at(objectCount, xId, yId, anchorCount, 4) = std::log(labelBox.length / anchorBox.length); // delta l,w,h + tensor.mutable_at(objectCount, xId, yId, anchorCount, 5) = std::log(labelBox.width / anchorBox.width); + tensor.mutable_at(objectCount, xId, yId, anchorCount, 6) = std::log(labelBox.height / anchorBox.height); + + // tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = std::sin(labelBox.yaw - anchorBox.yaw); //delta yaw + tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = labelBox.yaw - anchorBox.yaw; //delta yaw + if (labelBox.yaw > 0) // Is yaw > 0 + { + tensor.mutable_at(objectCount, xId, yId, anchorCount, 8) = 1; + } + else + { + tensor.mutable_at(objectCount, xId, yId, anchorCount, 8) = 0; + } + + tensor.mutable_at(objectCount, xId, yId, anchorCount, 9) = labelBox.classId; + + } + else if (iouOverlap < negativeThreshold) + { + tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = 0; + } + else + { + tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = -1; + } + + anchorCount++; + } + } + } + + if (maxIou < positiveThreshold) // Comparing maxIOU for that object obtained after checking with every anchor box + // If none of the anchors passed the threshold, then we place the best anchor details for that object. + { + negCnt++; + // if (printTime) + // { + // // std::cout << "\nThere was no sufficiently overlapping anchor anywhere for object " << objectCount << std::endl; + // py::print("\nThere was no sufficiently overlapping anchor anywhere for object " +std::to_string(objectCount)); + // // std::cout << "Best IOU was " << maxIou << ". Adding the best location regardless of threshold." << std::endl; + // py::print("\nBest IOU was "+std::to_string(maxIou)+" Adding the best location regardless of threshold"); + // py::print("\nBest IOU.x was "+std::to_string(bestAnchor.x)+" "); + // py::print("\nBest IOU.y was "+std::to_string(bestAnchor.y)+" "); + // py::print("\nBest IOU.z was "+std::to_string(bestAnchor.z)+" "); + // py::print("\nBest IOU.ry was "+std::to_string(bestAnchor.yaw)+" "); + // } + + const auto xId = bestAnchorXId; + const auto yId = bestAnchorYId; + // const auto xId = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); + // const auto yId = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); + const float diag = std::sqrt(std::pow(bestAnchor.width, 2) + std::pow(bestAnchor.length, 2)); + + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 0) = 1; + + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 1) = (labelBox.x - bestAnchor.x) / diag; + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 2) = (labelBox.y - bestAnchor.y) / diag; + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 3) = (labelBox.z - bestAnchor.z) / bestAnchor.height; + + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 4) = std::log(labelBox.length / bestAnchor.length); + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 5) = std::log(labelBox.width / bestAnchor.width); + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 6) = std::log(labelBox.height / bestAnchor.height); + + // tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = std::sin(labelBox.yaw - bestAnchor.yaw); + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = labelBox.yaw - bestAnchor.yaw; + if (labelBox.yaw > 0) + { + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 8) = 1; + } + else + { + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 8) = 0; + } +// Class id is the classification label (0,1,2,3) + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 9) = labelBox.classId; + } + else + { + posCnt++; + if (printTime) + { + std::cout << "\nAt least 1 anchor was positively matched for object " << objectCount << std::endl; + std::cout << "Best IOU was " << maxIou << "." << std::endl; + } + } + + objectCount++; + } + + std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast( t2 - t1 ).count(); + if (printTime) + std::cout << "createPillarsTarget took: " << static_cast(duration) / 1e6 << " seconds" << std::endl; + + return std::make_tuple(tensor, posCnt, negCnt); +} + +float cmath_sin(float value){ + return std::sin(value); +} + + +PYBIND11_MODULE(point_pillars_v2, m) +{ + m.def("createPillars", &createPillars, "Runs function to create point pillars input tensors"); + m.def("createPillarsTarget", &createPillarsTarget, "Runs function to create point pillars output ground truth"); + m.def("cmath_sin", &cmath_sin, "Runs function to compute sine"); +} diff --git a/viz.py b/viz.py new file mode 100644 index 0000000..e69de29 From a8aa0b5b00e66e692a74dcdb343e7507bca14181 Mon Sep 17 00:00:00 2001 From: tjtanaa Date: Fri, 20 Nov 2020 23:16:37 +0800 Subject: [PATCH 2/4] Replicate Original Model Architecture from Point Pillar Official Source Code --- README.md | 7 + config_v2.py | 73 ++- config_v2_2.py | 155 ++++++ loss.py | 14 +- loss_v2.py | 87 +++ loss_v2_2.py | 87 +++ network.py | 5 +- network_v2.py | 121 +++++ network_v2_2.py | 121 +++++ point_pillars_check_input.py | 148 +++++ point_pillars_custom_prediction.py | 96 ++-- point_pillars_custom_prediction_v2_2.py | 165 ++++++ point_pillars_custom_processors_v2.py | 38 +- point_pillars_custom_processors_v2_2.py | 380 +++++++++++++ point_pillars_training_custom_run_v2.py | 7 +- point_pillars_training_custom_run_v2_2.py | 87 +++ point_pillars_visualize_input.py | 4 +- src/point_pillars.cpp | 107 +++- src/point_pillars_v2.cpp | 634 ---------------------- 19 files changed, 1590 insertions(+), 746 deletions(-) create mode 100644 config_v2_2.py create mode 100644 loss_v2.py create mode 100644 loss_v2_2.py create mode 100644 network_v2.py create mode 100644 network_v2_2.py create mode 100644 point_pillars_check_input.py create mode 100644 point_pillars_custom_prediction_v2_2.py create mode 100644 point_pillars_custom_processors_v2_2.py create mode 100644 point_pillars_training_custom_run_v2_2.py delete mode 100644 src/point_pillars_v2.cpp diff --git a/README.md b/README.md index 54c2ba0..2e415ef 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ +# Forked from (https://github.com/tyagi-iiitv/PointPillars.git) [Under Development] + +The files with the v2_2 extentions are scripts that does not do class regression. +It only learn to classify single class. The confidence of objectness is used to determine +whether there is an object of a particular class or not. + + # About Point Pillars Point Pillars is a very famous Deep Neural Network for 3D Object Detection for LiDAR point clouds. With the application of object detection on the LiDAR devices fitted in the self driving cars, Point Pillars focuse on fast inference ~50fps, which was magnitudes above as compared to other networks for 3D Object detection. In this repo, we are trying to develop point pillars in TensorFlow. [Here's](https://medium.com/@a_tyagi/pointpillars-3d-point-clouds-bounding-box-detection-and-tracking-pointnet-pointnet-lasernet-67e26116de5a?source=friends_link&sk=4a27f55f2cea645af39f72117984fd22) a good first post to familiarize yourself with Point Pillars. diff --git a/config_v2.py b/config_v2.py index b477acf..5ad41a0 100644 --- a/config_v2.py +++ b/config_v2.py @@ -37,21 +37,41 @@ class DataParameters: # "Misc": 3, # } + # for Car and Pedestrian + # map_classes = { + # 0: "Car", + # 1: "Pedestrian" + # } + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # # "Cyclist": 2, + # # "Truck": 3, + # # "Van": 3, + # # "Tram": 3, + # # "Misc": 3, + # } + + + # for Car only map_classes = { - 0: "Car", - 1: "Pedestrian" + 0: "Car" } - classes_map = {"Car": 0, - "Pedestrian": 1, - "Person_sitting": 1, - # "Cyclist": 2, - # "Truck": 3, - # "Van": 3, - # "Tram": 3, - # "Misc": 3, + classes_map = {"Car": 0 } + # # for Pedestrian only + # map_classes = { + # 0: "Pedestrian" + # } + + # classes_map = { + # "Pedestrian": 0, + # "Person_sitting": 0, + # } + nb_classes = len(np.unique(list(classes_map.values()))) assert nb_classes == np.max(np.unique(list(classes_map.values()))) + 1, 'Starting class indexing at zero.' @@ -81,19 +101,36 @@ class NetworkParameters: downscaling_factor = 2 # length (x), width (y), height (z), z-center, orientation + # for car and pedestrian + # anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + # [3.9, 1.6, 1.56, -1, np.pi/2], + # [0.8, 0.6, 1.73, -0.6, 0], + # [0.8, 0.6, 1.73, -0.6, np.pi/2], + # ], dtype=np.float32).tolist() + + # for car only anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], - [3.9, 1.6, 1.56, -1, np.pi/2], - [0.8, 0.6, 1.73, -0.6, 0], - [0.8, 0.6, 1.73, -0.6, np.pi/2], - ], dtype=np.float32).tolist() - nb_dims = 3 + [3.9, 1.6, 1.56, -1, np.pi/2]], dtype=np.float32).tolist() + # for pedestrian only + # anchor_dims = np.array([[0.8, 0.6, 1.73, -0.6, 0], + # [0.8, 0.6, 1.73, -0.6, np.pi/2], + # ], dtype=np.float32).tolist() + nb_dims = 3 + + # for car positive_iou_threshold = 0.6 negative_iou_threshold = 0.3 + + # for pedestrian + # positive_iou_threshold = 0.5 + # negative_iou_threshold = 0.35 + # batch_size = 1 batch_size = 4 total_training_epochs = 160 - iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper + # iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper + iters_to_decay = 100500 learning_rate = 2e-4 decay_rate = 1e-8 L1 = 0 @@ -101,10 +138,10 @@ class NetworkParameters: alpha = 0.25 gamma = 2.0 # original pillars paper values - focal_weight = 3.0 # 1.0 + focal_weight = 1.0 # 1.0 loc_weight = 2.0 # 2.0 size_weight = 2.0 # 2.0 - angle_weight = 1.0 # 2.0 + angle_weight = 2.0 # 2.0 heading_weight = 0.2 # 0.2 class_weight = 0.5 # 0.2 diff --git a/config_v2_2.py b/config_v2_2.py new file mode 100644 index 0000000..5ad41a0 --- /dev/null +++ b/config_v2_2.py @@ -0,0 +1,155 @@ +import numpy as np + + +class GridParameters: + x_min = 0.0 + x_max = 80.64 + x_step = 0.16 + + y_min = -40.32 + y_max = 40.32 + y_step = 0.16 + + # z_min = -1.0 + # z_max = 3.0 + z_min = -3.0 + z_max = 1.0 + + # derived parameters + Xn_f = float(x_max - x_min) / x_step + Yn_f = float(y_max - y_min) / y_step + Xn = int(Xn_f) + Yn = int(Yn_f) + + def __init__(self, **kwargs): + super(GridParameters, self).__init__(**kwargs) + + +class DataParameters: + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # for Car and Pedestrian + # map_classes = { + # 0: "Car", + # 1: "Pedestrian" + # } + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # # "Cyclist": 2, + # # "Truck": 3, + # # "Van": 3, + # # "Tram": 3, + # # "Misc": 3, + # } + + + # for Car only + map_classes = { + 0: "Car" + } + + classes_map = {"Car": 0 + } + + # # for Pedestrian only + # map_classes = { + # 0: "Pedestrian" + # } + + # classes_map = { + # "Pedestrian": 0, + # "Person_sitting": 0, + # } + + nb_classes = len(np.unique(list(classes_map.values()))) + assert nb_classes == np.max(np.unique(list(classes_map.values()))) + 1, 'Starting class indexing at zero.' + + # classes = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # nb_classes = len(np.unique(list(classes.values()))) + # assert nb_classes == np.max(np.unique(list(classes.values()))) + 1, 'Starting class indexing at zero.' + + def __init__(self, **kwargs): + super(DataParameters, self).__init__(**kwargs) + + +class NetworkParameters: + + max_points_per_pillar = 100 + max_pillars = 12000 + nb_features = 9 + nb_channels = 64 + downscaling_factor = 2 + + # length (x), width (y), height (z), z-center, orientation + # for car and pedestrian + # anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + # [3.9, 1.6, 1.56, -1, np.pi/2], + # [0.8, 0.6, 1.73, -0.6, 0], + # [0.8, 0.6, 1.73, -0.6, np.pi/2], + # ], dtype=np.float32).tolist() + + # for car only + anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + [3.9, 1.6, 1.56, -1, np.pi/2]], dtype=np.float32).tolist() + + # for pedestrian only + # anchor_dims = np.array([[0.8, 0.6, 1.73, -0.6, 0], + # [0.8, 0.6, 1.73, -0.6, np.pi/2], + # ], dtype=np.float32).tolist() + nb_dims = 3 + + # for car + positive_iou_threshold = 0.6 + negative_iou_threshold = 0.3 + + # for pedestrian + # positive_iou_threshold = 0.5 + # negative_iou_threshold = 0.35 + + # batch_size = 1 + batch_size = 4 + total_training_epochs = 160 + # iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper + iters_to_decay = 100500 + learning_rate = 2e-4 + decay_rate = 1e-8 + L1 = 0 + L2 = 0 + alpha = 0.25 + gamma = 2.0 + # original pillars paper values + focal_weight = 1.0 # 1.0 + loc_weight = 2.0 # 2.0 + size_weight = 2.0 # 2.0 + angle_weight = 2.0 # 2.0 + heading_weight = 0.2 # 0.2 + class_weight = 0.5 # 0.2 + + def __init__(self, **kwargs): + super(NetworkParameters, self).__init__(**kwargs) + + +class Parameters(GridParameters, DataParameters, NetworkParameters): + + def __init__(self, **kwargs): + super(Parameters, self).__init__(**kwargs) diff --git a/loss.py b/loss.py index bd9aa21..092064c 100644 --- a/loss.py +++ b/loss.py @@ -1,7 +1,7 @@ import tensorflow as tf import tensorflow_probability as tfp from tensorflow.python.keras import backend as K -from config import Parameters +from config_v2 import Parameters class PointPillarNetworkLoss: @@ -47,6 +47,7 @@ def loc_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) loss = tf.compat.v1.losses.huber_loss(y_true, y_pred, + delta=3.0, reduction="none") masked_loss = tf.boolean_mask(loss, mask) @@ -56,13 +57,12 @@ def size_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) loss = tf.compat.v1.losses.huber_loss(y_true, y_pred, + delta=3.0, reduction="none") masked_loss = tf.boolean_mask(loss, mask) return self.size_weight * tf.reduce_mean(masked_loss) - - def add_sin_difference(self, y_true, y_pred, factor=1.0): if factor != 1.0: y_true = factor * y_true @@ -75,6 +75,7 @@ def angle_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): y_true, y_pred = self.add_sin_difference(y_true, y_pred, self.angle_weight) loss = tf.compat.v1.losses.huber_loss(y_true, y_pred, + delta=3.0, reduction="none") masked_loss = tf.boolean_mask(loss, self.mask) @@ -86,6 +87,7 @@ def heading_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): return self.heading_weight * tf.reduce_mean(masked_loss) def class_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): - loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred) - masked_loss = tf.boolean_mask(loss, self.mask) - return self.class_weight * tf.reduce_mean(masked_loss) + return 0 # for now since we are only learning one class, the class label is equivalent to the occupancy + # loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred) + # masked_loss = tf.boolean_mask(loss, self.mask) + # return self.class_weight * tf.reduce_mean(masked_loss) diff --git a/loss_v2.py b/loss_v2.py new file mode 100644 index 0000000..6f272dd --- /dev/null +++ b/loss_v2.py @@ -0,0 +1,87 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow.python.keras import backend as K +from config_v2 import Parameters + + +class PointPillarNetworkLoss: + + def __init__(self, params: Parameters): + self.alpha = float(params.alpha) + self.gamma = float(params.gamma) + self.focal_weight = float(params.focal_weight) + self.loc_weight = float(params.loc_weight) + self.size_weight = float(params.size_weight) + self.angle_weight = float(params.angle_weight) + self.heading_weight = float(params.heading_weight) + # self.class_weight = float(params.class_weight) + + def losses(self): + return [self.focal_loss, self.loc_loss, self.size_loss, self.angle_loss, self.heading_loss] + + def focal_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + """ y_true value from occ in {-1, 0, 1}, i.e. {bad match, neg box, pos box} """ + + self.mask = tf.equal(y_true, 1) + + cross_entropy = K.binary_crossentropy(y_true, y_pred) + + p_t = y_true * y_pred + (tf.subtract(1.0, y_true) * tf.subtract(1.0, y_pred)) + + gamma_factor = tf.pow(1.0 - p_t, self.gamma) + + alpha_factor = y_true * self.alpha + (1.0 - y_true) * (1.0 - self.alpha) + + focal_loss = gamma_factor * alpha_factor * cross_entropy + + neg_mask = tf.equal(y_true, 0) + thr = tfp.stats.percentile(tf.boolean_mask(focal_loss, neg_mask), 90.) + hard_neg_mask = tf.greater(focal_loss, thr) + # mask = tf.logical_or(tf.equal(y_true, 0), tf.equal(y_true, 1)) + mask = tf.logical_or(self.mask, tf.logical_and(neg_mask, hard_neg_mask)) + masked_loss = tf.boolean_mask(focal_loss, mask) + + return self.focal_weight * tf.reduce_mean(masked_loss) + + def loc_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, mask) + return self.loc_weight * tf.reduce_mean(masked_loss) + + def size_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, mask) + return self.size_weight * tf.reduce_mean(masked_loss) + + def add_sin_difference(self, y_true, y_pred, factor=1.0): + if factor != 1.0: + y_true = factor * y_true + y_pred = factor * y_pred + rad_pred_encoding = tf.math.sin(y_pred) * tf.math.cos(y_true) + rad_tg_encoding = tf.math.cos(y_pred) * tf.math.sin(y_true) + return rad_tg_encoding, rad_pred_encoding + + def angle_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + y_true, y_pred = self.add_sin_difference(y_true, y_pred) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, self.mask) + return self.angle_weight * tf.reduce_mean(masked_loss) + + def heading_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + loss = K.binary_crossentropy(y_true, y_pred) + masked_loss = tf.boolean_mask(loss, self.mask) + return self.heading_weight * tf.reduce_mean(masked_loss) \ No newline at end of file diff --git a/loss_v2_2.py b/loss_v2_2.py new file mode 100644 index 0000000..611a7ce --- /dev/null +++ b/loss_v2_2.py @@ -0,0 +1,87 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow.python.keras import backend as K +from config_v2_2 import Parameters + + +class PointPillarNetworkLoss: + + def __init__(self, params: Parameters): + self.alpha = float(params.alpha) + self.gamma = float(params.gamma) + self.focal_weight = float(params.focal_weight) + self.loc_weight = float(params.loc_weight) + self.size_weight = float(params.size_weight) + self.angle_weight = float(params.angle_weight) + self.heading_weight = float(params.heading_weight) + # self.class_weight = float(params.class_weight) + + def losses(self): + return [self.focal_loss, self.loc_loss, self.size_loss, self.angle_loss, self.heading_loss] + + def focal_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + """ y_true value from occ in {-1, 0, 1}, i.e. {bad match, neg box, pos box} """ + + self.mask = tf.equal(y_true, 1) + + cross_entropy = K.binary_crossentropy(y_true, y_pred) + + p_t = y_true * y_pred + (tf.subtract(1.0, y_true) * tf.subtract(1.0, y_pred)) + + gamma_factor = tf.pow(1.0 - p_t, self.gamma) + + alpha_factor = y_true * self.alpha + (1.0 - y_true) * (1.0 - self.alpha) + + focal_loss = gamma_factor * alpha_factor * cross_entropy + + neg_mask = tf.equal(y_true, 0) + thr = tfp.stats.percentile(tf.boolean_mask(focal_loss, neg_mask), 90.) + hard_neg_mask = tf.greater(focal_loss, thr) + # mask = tf.logical_or(tf.equal(y_true, 0), tf.equal(y_true, 1)) + mask = tf.logical_or(self.mask, tf.logical_and(neg_mask, hard_neg_mask)) + masked_loss = tf.boolean_mask(focal_loss, mask) + + return self.focal_weight * tf.reduce_mean(masked_loss) + + def loc_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, mask) + return self.loc_weight * tf.reduce_mean(masked_loss) + + def size_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + mask = tf.tile(tf.expand_dims(self.mask, -1), [1, 1, 1, 1, 3]) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, mask) + return self.size_weight * tf.reduce_mean(masked_loss) + + def add_sin_difference(self, y_true, y_pred, factor=1.0): + if factor != 1.0: + y_true = factor * y_true + y_pred = factor * y_pred + rad_pred_encoding = tf.math.sin(y_pred) * tf.math.cos(y_true) + rad_tg_encoding = tf.math.cos(y_pred) * tf.math.sin(y_true) + return rad_tg_encoding, rad_pred_encoding + + def angle_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + y_true, y_pred = self.add_sin_difference(y_true, y_pred) + loss = tf.compat.v1.losses.huber_loss(y_true, + y_pred, + delta=3.0, + reduction="none") + + masked_loss = tf.boolean_mask(loss, self.mask) + return self.angle_weight * tf.reduce_mean(masked_loss) + + def heading_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor): + loss = K.binary_crossentropy(y_true, y_pred) + masked_loss = tf.boolean_mask(loss, self.mask) + return self.heading_weight * tf.reduce_mean(masked_loss) \ No newline at end of file diff --git a/network.py b/network.py index 3d697eb..1404751 100644 --- a/network.py +++ b/network.py @@ -1,7 +1,6 @@ import tensorflow as tf import numpy as np -# from config import Parameters -from config_v2 import Parameters +from config import Parameters def build_point_pillar_graph(params: Parameters): @@ -116,4 +115,4 @@ def correct_batch_indices(tensor, batch_size): pillar_net = tf.keras.models.Model([input_pillars, input_indices], [occ, loc, size, angle, heading, clf]) # print(pillar_net.summary()) - return pillar_net + return pillar_net \ No newline at end of file diff --git a/network_v2.py b/network_v2.py new file mode 100644 index 0000000..4e1deed --- /dev/null +++ b/network_v2.py @@ -0,0 +1,121 @@ +import tensorflow as tf +import numpy as np +# from config import Parameters +from config_v2 import Parameters + + +def build_point_pillar_graph(params: Parameters): + + # extract required parameters + max_pillars = int(params.max_pillars) + max_points = int(params.max_points_per_pillar) + nb_features = int(params.nb_features) + nb_channels = int(params.nb_channels) + batch_size = int(params.batch_size) + image_size = tuple([params.Xn, params.Yn]) + nb_classes = int(params.nb_classes) + nb_anchors = len(params.anchor_dims) + + if tf.keras.backend.image_data_format() == "channels_first": + raise NotImplementedError + else: + input_shape = (max_pillars, max_points, nb_features) + + input_pillars = tf.keras.layers.Input(input_shape, batch_size=batch_size, name="pillars/input") + input_indices = tf.keras.layers.Input((max_pillars, 3), batch_size=batch_size, name="pillars/indices", + dtype=tf.int32) + + def correct_batch_indices(tensor, batch_size): + array = np.zeros((batch_size, max_pillars, 3), dtype=np.float32) + for i in range(batch_size): + array[i, :, 0] = i + return tensor + tf.constant(array, dtype=tf.int32) + + if batch_size > 1: + corrected_indices = tf.keras.layers.Lambda(lambda t: correct_batch_indices(t, batch_size))(input_indices) + else: + corrected_indices = input_indices + + # pillars + x = tf.keras.layers.Conv2D(nb_channels, (1, 1), activation='relu', use_bias=False, name="pillars/conv2d")(input_pillars) + x = tf.keras.layers.BatchNormalization(name="pillars/batchnorm", fused=True, epsilon=1e-3, momentum=0.01)(x) + x = tf.keras.layers.MaxPool2D((1, max_points), name="pillars/maxpooling2d")(x) + + if tf.keras.backend.image_data_format() == "channels_first": + reshape_shape = (nb_channels, max_pillars) + else: + reshape_shape = (max_pillars, nb_channels) + + x = tf.keras.layers.Reshape(reshape_shape, name="pillars/reshape")(x) + pillars = tf.keras.layers.Lambda(lambda inp: tf.scatter_nd(inp[0], inp[1], + (batch_size,) + image_size + (nb_channels,)), + name="pillars/scatter_nd")([corrected_indices, x]) + + # 2d cnn backbone + + # # Block1(S, 4, C) + # Block1(S, 4, C) + x = pillars + for n in range(4): + S = (2, 2) if n == 0 else (1, 1) + # # S = (1, 1) # pedestrian + x = tf.keras.layers.Conv2D(nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block1/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block1/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x1 = x + + # Block2(2S, 6, 2C) + for n in range(6): + S = (2, 2) if n == 0 else (1, 1) + x = tf.keras.layers.Conv2D(2 * nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block2/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block2/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x2 = x + + # Block3(4S, 6, 4C) + for n in range(6): + S = (2, 2) if n == 0 else (1, 1) + x = tf.keras.layers.Conv2D(4 * nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block3/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block3/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x3 = x + + + # Up1 (S, S, 2C) + up1 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (1, 1), strides=(1, 1), padding="same", activation="relu", + name="cnn/up1/conv2dt")(x1) + up1 = tf.keras.layers.BatchNormalization(name="cnn/up1/bn", fused=True, epsilon=1e-3, momentum=0.01)(up1) + + # Up2 (2S, S, 2C) + up2 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (2, 2), strides=(2, 2), padding="same", activation="relu", + name="cnn/up2/conv2dt")(x2) + up2 = tf.keras.layers.BatchNormalization(name="cnn/up2/bn", fused=True, epsilon=1e-3, momentum=0.01)(up2) + + # Up3 (4S, S, 2C) + up3 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (4, 4), strides=(4, 4), padding="same", activation="relu", + name="cnn/up3/conv2dt")(x3) + up3 = tf.keras.layers.BatchNormalization(name="cnn/up3/bn", fused=True, epsilon=1e-3, momentum=0.01)(up3) + + # Concat + concat = tf.keras.layers.Concatenate(name="cnn/concatenate")([up1, up2, up3]) + + # Detection head + occ = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="occupancy/conv2d", activation="sigmoid")(concat) + + loc = tf.keras.layers.Conv2D(nb_anchors * 3, (1, 1), name="loc/conv2d", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat) + loc = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name="loc/reshape")(loc) + + size = tf.keras.layers.Conv2D(nb_anchors * 3, (1, 1), name="size/conv2d", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat) + size = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name="size/reshape")(size) + + angle = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="angle/conv2d")(concat) + + heading = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="heading/conv2d", activation="sigmoid")(concat) + + # clf = tf.keras.layers.Conv2D(nb_anchors * nb_classes, (1, 1), name="clf/conv2d")(concat) + # clf = tf.keras.layers.Reshape(tuple(i // 2 for i in image_size) + (nb_anchors, nb_classes), name="clf/reshape")(clf) + + pillar_net = tf.keras.models.Model([input_pillars, input_indices], [occ, loc, size, angle, heading]) +# print(pillar_net.summary()) + + return pillar_net diff --git a/network_v2_2.py b/network_v2_2.py new file mode 100644 index 0000000..f30b6aa --- /dev/null +++ b/network_v2_2.py @@ -0,0 +1,121 @@ +import tensorflow as tf +import numpy as np +# from config import Parameters +from config_v2_2 import Parameters + + +def build_point_pillar_graph(params: Parameters): + + # extract required parameters + max_pillars = int(params.max_pillars) + max_points = int(params.max_points_per_pillar) + nb_features = int(params.nb_features) + nb_channels = int(params.nb_channels) + batch_size = int(params.batch_size) + image_size = tuple([params.Xn, params.Yn]) + nb_classes = int(params.nb_classes) + nb_anchors = len(params.anchor_dims) + + if tf.keras.backend.image_data_format() == "channels_first": + raise NotImplementedError + else: + input_shape = (max_pillars, max_points, nb_features) + + input_pillars = tf.keras.layers.Input(input_shape, batch_size=batch_size, name="pillars/input") + input_indices = tf.keras.layers.Input((max_pillars, 3), batch_size=batch_size, name="pillars/indices", + dtype=tf.int32) + + def correct_batch_indices(tensor, batch_size): + array = np.zeros((batch_size, max_pillars, 3), dtype=np.float32) + for i in range(batch_size): + array[i, :, 0] = i + return tensor + tf.constant(array, dtype=tf.int32) + + if batch_size > 1: + corrected_indices = tf.keras.layers.Lambda(lambda t: correct_batch_indices(t, batch_size))(input_indices) + else: + corrected_indices = input_indices + + # pillars + x = tf.keras.layers.Conv2D(nb_channels, (1, 1), activation='relu', use_bias=False, name="pillars/conv2d")(input_pillars) + x = tf.keras.layers.BatchNormalization(name="pillars/batchnorm", fused=True, epsilon=1e-3, momentum=0.01)(x) + x = tf.keras.layers.MaxPool2D((1, max_points), name="pillars/maxpooling2d")(x) + + if tf.keras.backend.image_data_format() == "channels_first": + reshape_shape = (nb_channels, max_pillars) + else: + reshape_shape = (max_pillars, nb_channels) + + x = tf.keras.layers.Reshape(reshape_shape, name="pillars/reshape")(x) + pillars = tf.keras.layers.Lambda(lambda inp: tf.scatter_nd(inp[0], inp[1], + (batch_size,) + image_size + (nb_channels,)), + name="pillars/scatter_nd")([corrected_indices, x]) + + # 2d cnn backbone + + # # Block1(S, 4, C) + # Block1(S, 4, C) + x = pillars + for n in range(4): + S = (2, 2) if n == 0 else (1, 1) + # # S = (1, 1) # pedestrian + x = tf.keras.layers.Conv2D(nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block1/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block1/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x1 = x + + # Block2(2S, 6, 2C) + for n in range(6): + S = (2, 2) if n == 0 else (1, 1) + x = tf.keras.layers.Conv2D(2 * nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block2/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block2/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x2 = x + + # Block3(4S, 6, 4C) + for n in range(6): + S = (2, 2) if n == 0 else (1, 1) + x = tf.keras.layers.Conv2D(4 * nb_channels, (3, 3), strides=S, padding="same", activation="relu", + name="cnn/block3/conv2d%i" % n)(x) + x = tf.keras.layers.BatchNormalization(name="cnn/block3/bn%i" % n, fused=True, epsilon=1e-3, momentum=0.01)(x) + x3 = x + + + # Up1 (S, S, 2C) + up1 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (1, 1), strides=(1, 1), padding="same", activation="relu", + name="cnn/up1/conv2dt")(x1) + up1 = tf.keras.layers.BatchNormalization(name="cnn/up1/bn", fused=True, epsilon=1e-3, momentum=0.01)(up1) + + # Up2 (2S, S, 2C) + up2 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (2, 2), strides=(2, 2), padding="same", activation="relu", + name="cnn/up2/conv2dt")(x2) + up2 = tf.keras.layers.BatchNormalization(name="cnn/up2/bn", fused=True, epsilon=1e-3, momentum=0.01)(up2) + + # Up3 (4S, S, 2C) + up3 = tf.keras.layers.Conv2DTranspose(2 * nb_channels, (4, 4), strides=(4, 4), padding="same", activation="relu", + name="cnn/up3/conv2dt")(x3) + up3 = tf.keras.layers.BatchNormalization(name="cnn/up3/bn", fused=True, epsilon=1e-3, momentum=0.01)(up3) + + # Concat + concat = tf.keras.layers.Concatenate(name="cnn/concatenate")([up1, up2, up3]) + + # Detection head + occ = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="occupancy/conv2d", activation="sigmoid")(concat) + + loc = tf.keras.layers.Conv2D(nb_anchors * 3, (1, 1), name="loc/conv2d", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat) + loc = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name="loc/reshape")(loc) + + size = tf.keras.layers.Conv2D(nb_anchors * 3, (1, 1), name="size/conv2d", kernel_initializer=tf.keras.initializers.TruncatedNormal(0, 0.001))(concat) + size = tf.keras.layers.Reshape(tuple(i//2 for i in image_size) + (nb_anchors, 3), name="size/reshape")(size) + + angle = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="angle/conv2d")(concat) + + heading = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="heading/conv2d", activation="sigmoid")(concat) + + # clf = tf.keras.layers.Conv2D(nb_anchors * nb_classes, (1, 1), name="clf/conv2d")(concat) + # clf = tf.keras.layers.Reshape(tuple(i // 2 for i in image_size) + (nb_anchors, nb_classes), name="clf/reshape")(clf) + + pillar_net = tf.keras.models.Model([input_pillars, input_indices], [occ, loc, size, angle, heading]) +# print(pillar_net.summary()) + + return pillar_net diff --git a/point_pillars_check_input.py b/point_pillars_check_input.py new file mode 100644 index 0000000..8ab59f6 --- /dev/null +++ b/point_pillars_check_input.py @@ -0,0 +1,148 @@ +import os +from glob import glob +import numpy as np +import tensorflow as tf +# from processors import SimpleDataGenerator +# from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator +from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker +from inference_utils_v2 import rotational_nms, generate_bboxes_from_pred_and_np_array +from readers import KittiDataReader +from config_v2 import Parameters +from network import build_point_pillar_graph + + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" +MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_Input_Coordinate_Analysis_v2" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_only" + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_labels_only" + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/pedestrian_input_coordinate_analysis_point_pillar_v2_gt_and_labels" + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=16384, split='train_val_test',random_select=False, classes=list(params.classes_map.keys())) + + for sample_id in validation_gen.sample_id_list: + print(sample_id) + + # for batch_idx in range(0,20): + # [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + + + # set_boxes, confidences = [], [] + # loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + # for i in range(loop_range): + # # set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + # # heading[i], + # # classification[i], params.anchor_dims, occ_threshold=0.15) + # gt_set_box, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + # heading_[i], + # classification_[i], params.anchor_dims, occ_threshold=0.5) + + # # exit() + # gt_boxes3d_ = gt_boxes3d[i] + + # print(gt_boxes3d_.shape) + # gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + # gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + # gt_boxes3d_[:,0], + # gt_boxes3d_[:,6]], axis=1) + + + # gt_bbox_params_list = gt_bbox_params.tolist() + # # gt_bbox_params_list = [] + # # print(gt_bbox_params_list) + # # print(len(gt_bbox_params_list)) + # # print(len(gt_bbox_params_list[0])) + + # for k in range(len(gt_bbox_params_list)): + # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) + # gt_bbox_params_list[k].append("Green") + # gt_bbox_params_list[k].append(msg) + + # if len(gt_set_box) > 0: + # decoded_gt_boxes3d_ = decoded_gt_boxes3d + # # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) + + # print(decoded_gt_boxes3d_.shape) + # # print(predicted_boxes3d_) + # # print(size[i]) + + # bbox_params = np.stack([decoded_gt_boxes3d_[:,3], decoded_gt_boxes3d_[:,5], decoded_gt_boxes3d_[:,4], + # decoded_gt_boxes3d_[:,1], decoded_gt_boxes3d_[:,2] , + # decoded_gt_boxes3d_[:,0], + # decoded_gt_boxes3d_[:,6]], axis=1) + + + # # bbox_params = np.stack([predicted_boxes3d[:,4], predicted_boxes3d[:,5], predicted_boxes3d[:,3], + # # predicted_boxes3d[:,1], -(predicted_boxes3d[:,2] - predicted_boxes3d[:,5] / 2), + # # predicted_boxes3d[:,0], + # # predicted_boxes3d[:,6]], axis=1) + + # bbox_params_list = bbox_params.tolist() + # # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + # for k in range(decoded_gt_boxes3d.shape[0]): + # msg = "%.5f, %.5f"%(bbox_params_list[k][3],bbox_params_list[k][5]) + # # msg = (str(bbox_params_list[k][3:5])) + # bbox_params_list[k].append("Magenta") + # bbox_params_list[k].append(msg) + # # bbox_params_list[k].append(str(decoded_gt_boxes3d[k,9]) + params.map_classes[int(decoded_gt_boxes3d[k,8])]) + # gt_bbox_params_list.append(bbox_params_list[k]) + + # # print(gt_bbox_params_list) + # # print(gt_bbox_params.tolist()) + + # coor = pts_input[i][:,[1,2,0]] + # # coor[:,1] *= -1 + # Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + # bbox_params=gt_bbox_params_list) + + # exit() + # set_boxes.append(set_box) + # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, + # # classification, params.anchor_dims, occ_threshold=0.1)) + # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) + + # sum_bboxes = 0 + # for h in range(len(set_boxes)): + # sum_bboxes += len(set_boxes[h]) + + # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) + # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) + # exit() + # print(set_boxes[-1]) + + # # NMS + # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) + + # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) + + # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes + # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) + # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) + # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): + # print("---------- New Scenario ---------- ") + # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) + # print("---------- ------------ ---------- ") + # for gt in gt_label: + # print(gt) + # for pred in seq_boxes: + # print(pred) diff --git a/point_pillars_custom_prediction.py b/point_pillars_custom_prediction.py index 96cc9e6..43e1003 100644 --- a/point_pillars_custom_prediction.py +++ b/point_pillars_custom_prediction.py @@ -5,20 +5,20 @@ # from processors import SimpleDataGenerator # from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator -from inference_utils import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array +from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator +from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array from readers import KittiDataReader -from config import Parameters +from config_v2 import Parameters from network import build_point_pillar_graph from point_viz.converter import PointvizConverter DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" -# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" -MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_Input_Coordinate_Analysis_v2" +MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "2" +os.environ["CUDA_VISIBLE_DEVICES"] = "0" def limit_period(val, offset=0.5, period=np.pi): @@ -27,22 +27,13 @@ def limit_period(val, offset=0.5, period=np.pi): if __name__ == "__main__": params = Parameters() - # pillar_net = build_point_pillar_graph(params) - # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) - # pillar_net.summary() - - # data_reader = KittiDataReader() - - # lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne", "*.bin"))) - # label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2", "*.txt"))) - # calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib", "*.txt"))) - # assert len(lidar_files) == len(label_files) == len(calibration_files), "Input dirs require equal number of files." - # eval_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files, label_files, calibration_files) - - + pillar_net = build_point_pillar_graph(params) + pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + exit() # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" - save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_input_coordinate_analysis_point_pillar_v2" + save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) # Initialize and setup output directory. Converter = PointvizConverter(save_viz_path) @@ -59,11 +50,13 @@ def limit_period(val, offset=0.5, period=np.pi): validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) + # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + # npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) for batch_idx in range(0,20): [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] - # occupancy, position, size, angle, heading, classification = pillar_net.predict([pillars, voxels]) + occupancy, position, size, angle, heading, classification = pillar_net.predict([pillars, voxels]) # angle = limit_period(angle, offset=0.5, period=2*np.pi) @@ -76,13 +69,19 @@ def limit_period(val, offset=0.5, period=np.pi): set_boxes, confidences = [], [] loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 for i in range(loop_range): - # set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], - # heading[i], - # classification[i], params.anchor_dims, occ_threshold=0.15) + set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + heading[i], + classification[i], params.anchor_dims, occ_threshold=0.5) - gt_boxes3d_ = gt_boxes3d[i] - print(gt_boxes3d_.shape) + _, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + heading_[i], + classification_[i], params.anchor_dims, occ_threshold=0.4) + + # gt_boxes3d_ = gt_boxes3d[i] + gt_boxes3d_ = decoded_gt_boxes3d + + # print(gt_boxes3d_.shape) gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], gt_boxes3d_[:,1], gt_boxes3d_[:,2] , gt_boxes3d_[:,0], @@ -90,39 +89,40 @@ def limit_period(val, offset=0.5, period=np.pi): gt_bbox_params_list = gt_bbox_params.tolist() + # gt_bbox_params_list = [] for k in range(len(gt_bbox_params_list)): + msg = "%.5f, %s, %.5f"%(decoded_gt_boxes3d[k,9], params.map_classes[int(decoded_gt_boxes3d[k,8])], decoded_gt_boxes3d[k,6]) + # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) gt_bbox_params_list[k].append("Green") - gt_bbox_params_list[k].append("1.0") - - # if len(set_box) > 0: - # predicted_boxes3d_ = predicted_boxes3d - # # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) + # gt_bbox_params_list[k].append("1.0") + gt_bbox_params_list[k].append(msg) - # # print(predicted_boxes3d_.shape) - # # print(predicted_boxes3d_) - # # print(size[i]) + if len(set_box) > 0: + predicted_boxes3d_ = predicted_boxes3d + # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) - # bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], - # predicted_boxes3d_[:,1], (predicted_boxes3d_[:,2] - predicted_boxes3d_[:,5] / 2) , - # predicted_boxes3d_[:,0], - # predicted_boxes3d_[:,6]], axis=1) + print("batch_idx: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") + # print(predicted_boxes3d_) + # print(size[i]) + bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , + predicted_boxes3d_[:,0], + predicted_boxes3d_[:,6]], axis=1) - # # bbox_params = np.stack([predicted_boxes3d[:,4], predicted_boxes3d[:,5], predicted_boxes3d[:,3], - # # predicted_boxes3d[:,1], -(predicted_boxes3d[:,2] - predicted_boxes3d[:,5] / 2), - # # predicted_boxes3d[:,0], - # # predicted_boxes3d[:,6]], axis=1) - # bbox_params_list = bbox_params.tolist() - # # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] - # for k in range(predicted_boxes3d.shape[0]): - # bbox_params_list[k].append("Magenta") - # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + params.map_classes[int(predicted_boxes3d[k,8])]) - # gt_bbox_params_list.append(bbox_params_list[k]) + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(predicted_boxes3d.shape[0]): + msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) coor = pts_input[i][:,[1,2,0]] # coor[:,1] *= -1 - Converter.compile("train_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], bbox_params=gt_bbox_params_list) # set_boxes.append(set_box) diff --git a/point_pillars_custom_prediction_v2_2.py b/point_pillars_custom_prediction_v2_2.py new file mode 100644 index 0000000..ac942d3 --- /dev/null +++ b/point_pillars_custom_prediction_v2_2.py @@ -0,0 +1,165 @@ +import os +from glob import glob +import numpy as np +import tensorflow as tf +from point_pillars_custom_processors_v2_2 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator +from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array +from readers import KittiDataReader +from config_v2_2 import Parameters +from network_v2_2 import build_point_pillar_graph +from datetime import datetime + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" +MODEL_ROOT = "./logs_Car_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val_new_network" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "3" + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + pillar_net = build_point_pillar_graph(params) + pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + + + + + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, + # npoints=20000, split='train', classes=list(params.classes_map.keys()), + # random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=20000, split='train_val_test',random_select=False, classes=list(params.classes_map.keys())) + # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + # npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) + + inference_duration = [] + + for batch_idx in range(0,70): + [pillars, voxels], [occupancy_, position_, size_, angle_, heading_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + + + start=datetime.now() + + occupancy, position, size, angle, heading = pillar_net.predict([pillars, voxels]) + + inference_duration.append( datetime.now()-start) + + # angle = limit_period(angle, offset=0.5, period=2*np.pi) + + classification = np.zeros(shape=np.array(occupancy).shape) + classification_ = classification + # occupancy[:,:,:,:2] = 0 + + # print(occupancy.shape) + # exit() + + set_boxes, confidences = [], [] + loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + for i in range(loop_range): + set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + heading[i], + classification[i], params.anchor_dims, occ_threshold=0.5) + + + _, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + heading_[i], + classification_[i], params.anchor_dims, occ_threshold=0.4) + + # gt_boxes3d_ = gt_boxes3d[i] + gt_boxes3d_ = decoded_gt_boxes3d + + print(gt_boxes3d_.shape) + if(len(gt_boxes3d_) == 0): + gt_bbox_params_list = [] + else: + gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + gt_boxes3d_[:,0], + gt_boxes3d_[:,6]], axis=1) + + + gt_bbox_params_list = gt_bbox_params.tolist() + # gt_bbox_params_list = [] + for k in range(len(gt_bbox_params_list)): + msg = "%.5f, %s, %.5f"%(decoded_gt_boxes3d[k,9], params.map_classes[int(decoded_gt_boxes3d[k,8])], decoded_gt_boxes3d[k,6]) + # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) + gt_bbox_params_list[k].append("Green") + # gt_bbox_params_list[k].append("1.0") + gt_bbox_params_list[k].append(msg) + + if len(set_box) > 0: + predicted_boxes3d_ = predicted_boxes3d + # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) + + print("batch_idx: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") + # print(predicted_boxes3d_) + # print(size[i]) + + bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , + predicted_boxes3d_[:,0], + predicted_boxes3d_[:,6]], axis=1) + + + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(predicted_boxes3d.shape[0]): + msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) + + coor = pts_input[i][:,[1,2,0]] + # coor[:,1] *= -1 + Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + print("Average runtime speed: ", np.mean(inference_duration[20:])) + # set_boxes.append(set_box) + # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, + # # classification, params.anchor_dims, occ_threshold=0.1)) + # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) + + # sum_bboxes = 0 + # for h in range(len(set_boxes)): + # sum_bboxes += len(set_boxes[h]) + + # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) + # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) + # exit() + # print(set_boxes[-1]) + + # # NMS + # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) + + # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) + + # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes + # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) + # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) + # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): + # print("---------- New Scenario ---------- ") + # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) + # print("---------- ------------ ---------- ") + # for gt in gt_label: + # print(gt) + # for pred in seq_boxes: + # print(pred) diff --git a/point_pillars_custom_processors_v2.py b/point_pillars_custom_processors_v2.py index 3c825c4..ae2b17c 100644 --- a/point_pillars_custom_processors_v2.py +++ b/point_pillars_custom_processors_v2.py @@ -79,7 +79,7 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): a = int(self.anchor_dims.shape[0]) return np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), \ np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), np.zeros((pX, pY, a), dtype='float32'), \ - np.zeros((pX, pY, a, self.nb_classes), dtype='float64') + np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_classes), dtype='float64') # For each label file, generate these properties except for the Don't care class @@ -127,8 +127,11 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): # return a merged target view for all objects in the ground truth and get categorical labels # print("target.shape: ", target.shape) sel = select_best_anchors(target) + # print("self.shape: ", sel[...,0].shape) ohe = tf.keras.utils.to_categorical(sel[..., 9], num_classes=self.nb_classes, dtype='float64') # print("self.shape: ", sel[...,0].shape) + # print("ohe.shape: ", ohe.shape) + # print("sel[8].shape: ",sel[..., 8].shape) return sel[..., 0], sel[..., 1:4], sel[..., 4:7], sel[..., 7], sel[..., 8], ohe @@ -149,7 +152,8 @@ def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str # self.data_reader = data_reader self.batch_size = batch_size self.sample_id_list=self.get_sample_id_list() - self.split = split + # self.split = split + # print("CustomDataGenerator: " ,self.split) def get_sample(self, index): @@ -219,13 +223,15 @@ def __getitem__(self, batch_id: int): angle = np.array(angle) heading = np.array(heading) classification = np.array(classification) - return [pillars, voxels], [occupancy, position, size, angle, heading, classification] + # return [pillars, voxels], [occupancy, position, size, angle, heading, classification] # network + return [pillars, voxels], [occupancy, position, size, angle, heading] # network_v2 else: return [pillars, voxels] def on_epoch_end(self): # print("inside epoch") if self.split=='train' or self.split =='val': + # pass self.sample_id_list=shuffle(self.sample_id_list) @@ -245,19 +251,15 @@ def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str # self.data_reader = data_reader self.batch_size = batch_size self.sample_id_list=self.get_sample_id_list() - self.split = split + # self.split = split + # self.split = split + # print("AnalyseCustomDataGenerator: " ,self.split) - def get_sample(self, index): + def _get_sample(self, index): return super().get_sample(index) - - # def convert_labels_into_point_viz_format(self, gt_boxes3d): - # gt_boxes3d = gt_boxes3d[:,[3,4,5,0,1,2, 6]] # [xyz,3l4w5h,ry] => [3l,5h,4w] - # gt_boxes3d[:,5] -= (gt_boxes3d[:,2] /2) - # return gt_boxes3d - def __len__(self): return len(self.sample_id_list) // self.batch_size @@ -284,7 +286,7 @@ def __getitem__(self, batch_id: int): for i in file_ids: # print(i) # print(type(i)) - sample = self.get_sample(i) + sample = self._get_sample(i) # For each file, dividing the space into a x-y grid to create pillars pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) # print(pts_lidar.shape) @@ -320,7 +322,7 @@ def __getitem__(self, batch_id: int): # Voxels are the pillar ids pillars_, voxels_ = self.make_point_pillars(pts_input) - print(pillars_.shape, voxels_.shape) + # print(pillars_.shape, voxels_.shape) # for i in range(10): # print(pillars_[0,0,i,:]) # print(np.sum(pillars_ > 0)) @@ -330,10 +332,15 @@ def __getitem__(self, batch_id: int): voxels.append(voxels_) # print(sample['gt_cls_type_list']) - + # print("split: ", self.split) if self.split=='train' or self.split =='val': + # print(len(gt_boxes3d), ", ", len(sample['gt_cls_type_list'])) + if (len(gt_boxes3d) == 0): + print("file id: ", i, " has zero gt label") occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( gt_boxes3d, sample['gt_cls_type_list']) + # print(len(a)) + # if # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) @@ -361,7 +368,8 @@ def __getitem__(self, batch_id: int): angle = np.array(angle) heading = np.array(heading) classification = np.array(classification) - return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input_, gt_boxes3d_, sample_] + # return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input_, gt_boxes3d_, sample_] + return [pillars, voxels], [occupancy, position, size, angle, heading], [pts_input_, gt_boxes3d_, sample_] # new network else: return [pillars, voxels] diff --git a/point_pillars_custom_processors_v2_2.py b/point_pillars_custom_processors_v2_2.py new file mode 100644 index 0000000..ae2b17c --- /dev/null +++ b/point_pillars_custom_processors_v2_2.py @@ -0,0 +1,380 @@ +from typing import List, Any +import numpy as np +import tensorflow as tf + +from tensorflow.python.keras.utils.data_utils import Sequence + +from config_v2 import Parameters +# from point_pillars import createPillars, createPillarsTarget +from point_pillars_v2 import createPillars, createPillarsTarget +# from readers import DataReader, Label3D +from sklearn.utils import shuffle +import sys + +from det3d.pc_kitti_dataset import PCKittiAugmentedDataset + +from point_viz.converter import PointvizConverter + + +def select_best_anchors(arr): + dims = np.indices(arr.shape[1:]) + # arr[..., 0:1] gets the occupancy value from occ in {-1, 0, 1}, i.e. {bad match, neg box, pos box} + ind = (np.argmax(arr[..., 0:1], axis=0),) + tuple(dims) + + return arr[ind] + + +class DataProcessor(Parameters): + + def __init__(self, **kwargs): + super(DataProcessor, self).__init__(**kwargs) + anchor_dims = np.array(self.anchor_dims, dtype=np.float32) + self.anchor_dims = anchor_dims[:, 0:3] + self.anchor_z = anchor_dims[:, 3] + self.anchor_yaw = anchor_dims[:, 4] + # Counts may be used to make statistic about how well the anchor boxes fit the objects + self.pos_cnt, self.neg_cnt = 0, 0 + + def make_point_pillars(self, points: np.ndarray): + + assert points.ndim == 2 + assert points.shape[1] == 4 + assert points.dtype == np.float32 + + pillars, indices = createPillars(points, + self.max_points_per_pillar, + self.max_pillars, + self.x_step, + self.y_step, + self.x_min, + self.x_max, + self.y_min, + self.y_max, + self.z_min, + self.z_max, + False) + + return pillars, indices + + def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): + """ Generate the ground truth label for each pillars + + Args: + gt_boxes_3d (numpy[float]): A list of floats containing [x, y, z, h, w, l, ry] + gt_cls_type_list (List[str]): A list of floats containing [cls_type] + + Returns: + [type]: [description] + """ + + # filter labels by classes (cars, pedestrians and Trams) + # Label has 4 properties (Classification (0th index of labels file), + # centroid coordinates, dimensions, yaw) + # labels = list(filter(lambda x: x.classification in self.classes, labels)) + + + + if len(gt_boxes_3d) == 0: + pX, pY = int(self.Xn / self.downscaling_factor), int(self.Yn / self.downscaling_factor) + a = int(self.anchor_dims.shape[0]) + return np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), \ + np.zeros((pX, pY, a, self.nb_dims), dtype='float32'), np.zeros((pX, pY, a), dtype='float32'), \ + np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_classes), dtype='float64') + + # For each label file, generate these properties except for the Don't care class + + # target_positions = np.array([label.centroid for label in labels], dtype=np.float32) + # target_dimension = np.array([label.dimension for label in labels], dtype=np.float32) + # target_yaw = np.array([label.yaw for label in labels], dtype=np.float32) + # target_class = np.array([self.classes[label.classification] for label in labels], dtype=np.int32) + + target_positions = gt_boxes_3d[:,:3] + target_dimension = gt_boxes_3d[:,3:6] # don't have to translate again + target_yaw = gt_boxes_3d[:, 6] + # print(type(self.classes)) + # print(type(self.classes_map)) + # # print(gt_cls_type_list[0]) + # print(self.classes_map[gt_cls_type_list[0]]) + + target_class = np.array([self.classes_map[gt_cls_type_list[k]] for k in range(len(gt_cls_type_list))], dtype=np.int32) + + assert np.all(target_yaw >= -np.pi) & np.all(target_yaw <= np.pi) + assert len(target_positions) == len(target_dimension) == len(target_yaw) == len(target_class) + + target, pos, neg = createPillarsTarget(target_positions, + target_dimension, + target_yaw, + target_class, + self.anchor_dims, + self.anchor_z, + self.anchor_yaw, + self.positive_iou_threshold, + self.negative_iou_threshold, + self.nb_classes, + self.downscaling_factor, + self.x_step, + self.y_step, + self.x_min, + self.x_max, + self.y_min, + self.y_max, + self.z_min, + self.z_max, + False) + self.pos_cnt += pos + self.neg_cnt += neg + + # return a merged target view for all objects in the ground truth and get categorical labels + # print("target.shape: ", target.shape) + sel = select_best_anchors(target) + # print("self.shape: ", sel[...,0].shape) + ohe = tf.keras.utils.to_categorical(sel[..., 9], num_classes=self.nb_classes, dtype='float64') + # print("self.shape: ", sel[...,0].shape) + # print("ohe.shape: ", ohe.shape) + # print("sel[8].shape: ",sel[..., 8].shape) + + return sel[..., 0], sel[..., 1:4], sel[..., 4:7], sel[..., 7], sel[..., 8], ohe + + +class CustomDataGenerator(DataProcessor, Sequence, PCKittiAugmentedDataset): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, + gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): + + super(CustomDataGenerator, self).__init__( + batch_size=batch_size, root_dir=root_dir, + npoints=npoints, split=split, classes=classes, + random_select=random_select, gt_database_dir=gt_database_dir, + aug_hard_ratio=aug_hard_ratio, **kwargs + ) + # self.data_reader = data_reader + self.batch_size = batch_size + self.sample_id_list=self.get_sample_id_list() + # self.split = split + # print("CustomDataGenerator: " ,self.split) + + + def get_sample(self, index): + return super().get_sample(index) + + + def __len__(self): + return len(self.sample_id_list) // self.batch_size + + def __getitem__(self, batch_id: int): + file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) + # print("inside getitem") + pillars = [] + voxels = [] + occupancy = [] + position = [] + size = [] + angle = [] + heading = [] + classification = [] + + + + for i in file_ids: + sample = self.get_sample(i) + # For each file, dividing the space into a x-y grid to create pillars + pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) + pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) + + gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) + + gt_boxes3d = np.concatenate(( + gt_boxes3d_xyz[:,0,np.newaxis], # 0 x + gt_boxes3d_xyz[:,1,np.newaxis], # 1 y + gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z + sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label + sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label + sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label + -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry + ), axis=1) + + # Voxels are the pillar ids + pillars_, voxels_ = self.make_point_pillars(pts_input) + + pillars.append(pillars_) + voxels.append(voxels_) + + + if self.split=='train' or self.split =='val': + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + gt_boxes3d, sample['gt_cls_type_list']) + + occupancy.append(occupancy_) + position.append(position_) + size.append(size_) + angle.append(angle_) + heading.append(heading_) + classification.append(classification_) + + pillars = np.concatenate(pillars, axis=0) + voxels = np.concatenate(voxels, axis=0) + + if self.split=='train' or self.split =='val': + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + # return [pillars, voxels], [occupancy, position, size, angle, heading, classification] # network + return [pillars, voxels], [occupancy, position, size, angle, heading] # network_v2 + else: + return [pillars, voxels] + + def on_epoch_end(self): + # print("inside epoch") + if self.split=='train' or self.split =='val': + # pass + self.sample_id_list=shuffle(self.sample_id_list) + + +class AnalyseCustomDataGenerator(CustomDataGenerator): + """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ + + def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, + gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): + + super(AnalyseCustomDataGenerator, self).__init__( + batch_size=batch_size, root_dir=root_dir, + npoints=npoints, split=split, classes=classes, + random_select=random_select, gt_database_dir=gt_database_dir, + aug_hard_ratio=aug_hard_ratio, **kwargs + ) + # self.data_reader = data_reader + self.batch_size = batch_size + self.sample_id_list=self.get_sample_id_list() + # self.split = split + # self.split = split + # print("AnalyseCustomDataGenerator: " ,self.split) + + + def _get_sample(self, index): + return super().get_sample(index) + + + def __len__(self): + return len(self.sample_id_list) // self.batch_size + + def __getitem__(self, batch_id: int): + file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) + # print("inside getitem") + pillars = [] + voxels = [] + occupancy = [] + position = [] + size = [] + angle = [] + heading = [] + classification = [] + + pts_input_ = [] + gt_boxes3d_ = [] + sample_ = [] + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" + # # Initialize and setup output directory. + # Converter = PointvizConverter(save_viz_path) + + for i in file_ids: + # print(i) + # print(type(i)) + sample = self._get_sample(i) + # For each file, dividing the space into a x-y grid to create pillars + pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) + # print(pts_lidar.shape) + + pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) + + gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) + + # print(gt_boxes3d_xyz.shape) + + gt_boxes3d = np.concatenate(( + gt_boxes3d_xyz[:,0,np.newaxis], # 0 x + gt_boxes3d_xyz[:,1,np.newaxis], # 1 y + gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z + sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label + sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label + sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label + -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry + ), axis=1) + + # print(type(gt_boxes3d)) + # gt_boxes3d = self.limit_yaw(gt_boxes3d) + + # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) + # print(bbox_params.shape) + # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], + # bbox_params=bbox_params) + + + # exit() + + # print(pts_input.shape) + # Voxels are the pillar ids + pillars_, voxels_ = self.make_point_pillars(pts_input) + + # print(pillars_.shape, voxels_.shape) + # for i in range(10): + # print(pillars_[0,0,i,:]) + # print(np.sum(pillars_ > 0)) + # exit() + + pillars.append(pillars_) + voxels.append(voxels_) + + # print(sample['gt_cls_type_list']) + # print("split: ", self.split) + if self.split=='train' or self.split =='val': + # print(len(gt_boxes3d), ", ", len(sample['gt_cls_type_list'])) + if (len(gt_boxes3d) == 0): + print("file id: ", i, " has zero gt label") + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + gt_boxes3d, sample['gt_cls_type_list']) + # print(len(a)) + # if + + # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) + + + occupancy.append(occupancy_) + position.append(position_) + size.append(size_) + angle.append(angle_) + heading.append(heading_) + classification.append(classification_) + + sample_.append(sample) + gt_boxes3d_.append(gt_boxes3d) + pts_input_.append(pts_input) + + # exit() + + pillars = np.concatenate(pillars, axis=0) + voxels = np.concatenate(voxels, axis=0) + + if self.split=='train' or self.split =='val': + occupancy = np.array(occupancy) + position = np.array(position) + size = np.array(size) + angle = np.array(angle) + heading = np.array(heading) + classification = np.array(classification) + # return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input_, gt_boxes3d_, sample_] + return [pillars, voxels], [occupancy, position, size, angle, heading], [pts_input_, gt_boxes3d_, sample_] # new network + else: + return [pillars, voxels] + + def on_epoch_end(self): + # print("inside epoch") + if self.split=='train' or self.split =='val': + self.sample_id_list=shuffle(self.sample_id_list) + \ No newline at end of file diff --git a/point_pillars_training_custom_run_v2.py b/point_pillars_training_custom_run_v2.py index 196003d..1293f32 100644 --- a/point_pillars_training_custom_run_v2.py +++ b/point_pillars_training_custom_run_v2.py @@ -20,10 +20,11 @@ # DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" # TODO make main arg # MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" -MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" +MODEL_ROOT = "./logs_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "2" +os.environ["CUDA_VISIBLE_DEVICES"] = "1" if __name__ == "__main__": @@ -42,7 +43,7 @@ gt_database_dir = None training_gen = CustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, - npoints=20000, split='train', classes=list(params.classes_map.keys()), + npoints=20000, split='train_val', classes=list(params.classes_map.keys()), random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) # validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, diff --git a/point_pillars_training_custom_run_v2_2.py b/point_pillars_training_custom_run_v2_2.py new file mode 100644 index 0000000..c7b4206 --- /dev/null +++ b/point_pillars_training_custom_run_v2_2.py @@ -0,0 +1,87 @@ +import os +import time +import numpy as np +import tensorflow as tf +from glob import glob + +# from config import Parameters +from config_v2_2 import Parameters +from loss_v2_2 import PointPillarNetworkLoss +from network_v2_2 import build_point_pillar_graph +from point_pillars_custom_processors_v2_2 import CustomDataGenerator +from readers import KittiDataReader + +# from point_viz.converter import PointvizConverter + +tf.get_logger().setLevel("ERROR") + +# DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" # TODO make main arg +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" +# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" +MODEL_ROOT = "./logs_Car_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val_new_network" + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "1" + +if __name__ == "__main__": + params = Parameters() + + pillar_net = build_point_pillar_graph(params) + # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + # exit() + loss = PointPillarNetworkLoss(params) + + optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate, decay=params.decay_rate) + + pillar_net.compile(optimizer, loss=loss.losses()) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + # gt_database_dir = None + + training_gen = CustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, + npoints=20000, split='train_val', classes=list(params.classes_map.keys()), + random_select=True, gt_database_dir=gt_database_dir, aug_hard_ratio=0.7) + + validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=20000, split='train_val_test', classes=list(params.classes_map.keys())) + + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" + # Initialize and setup output directory. + # Converter = PointvizConverter(save_viz_path) + + + + # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) + # print(bbox_params.shape) + # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], + # bbox_params=bbox_params) + + + log_dir = MODEL_ROOT + epoch_to_decay = int( + np.round(params.iters_to_decay / params.batch_size * int(len(training_gen)))) + callbacks = [ + tf.keras.callbacks.TensorBoard(log_dir=log_dir), + tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(log_dir, "model.h5"), + monitor='val_loss', save_best_only=True), + tf.keras.callbacks.LearningRateScheduler( + lambda epoch, lr: lr * 0.8 if ((epoch % epoch_to_decay == 0) and (epoch != 0)) else lr, verbose=True), + # tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'), + ] + + try: + pillar_net.fit(training_gen, + validation_data = validation_gen, + steps_per_epoch=len(training_gen), + callbacks=callbacks, + use_multiprocessing=True, + # max_queue_size = 16, + epochs=int(params.total_training_epochs), + workers=6) + except KeyboardInterrupt: + model_str = "interrupted_%s.h5" % time.strftime("%Y%m%d-%H%M%S") + pillar_net.save(os.path.join(log_dir, model_str)) + print("Interrupt. Saving output to %s" % os.path.join(os.getcwd(), log_dir[1:], model_str)) diff --git a/point_pillars_visualize_input.py b/point_pillars_visualize_input.py index 91ce836..090c3f5 100644 --- a/point_pillars_visualize_input.py +++ b/point_pillars_visualize_input.py @@ -31,14 +31,14 @@ def limit_period(val, offset=0.5, period=np.pi): # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_only" # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_labels_only" - save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/input_coordinate_analysis_point_pillar_v2_gt_and_labels" + save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/pedestrian_input_coordinate_analysis_point_pillar_v2_gt_and_labels" # Initialize and setup output directory. Converter = PointvizConverter(save_viz_path) gt_database_dir = os.path.join(DATA_ROOT, "gt_database") validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, - npoints=16384, split='val',random_select=False, classes=list(params.classes_map.keys())) + npoints=16384, split='train_val_test',random_select=False, classes=list(params.classes_map.keys())) for batch_idx in range(0,20): [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] diff --git a/src/point_pillars.cpp b/src/point_pillars.cpp index 6451f6c..e30d087 100644 --- a/src/point_pillars.cpp +++ b/src/point_pillars.cpp @@ -27,6 +27,8 @@ struct PillarPoint { float xc; float yc; float zc; + float xp; + float yp; }; pybind11::tuple createPillars(pybind11::array_t points, @@ -64,13 +66,15 @@ pybind11::tuple createPillars(pybind11::array_t points, auto yIndex = static_cast(std::floor((points.at(i, 1) - yMin) / yStep)); PillarPoint p = { - points.at(i, 0), - points.at(i, 1), - points.at(i, 2), - points.at(i, 3), - 0, - 0, - 0, + points.at(i, 0), // x + points.at(i, 1), // y + points.at(i, 2), // z + points.at(i, 3), // intensity + 0, // xc + 0, // yc + 0, // zc + 0, // xp + 0, // yp }; map[{xIndex, yIndex}].emplace_back(p); @@ -79,7 +83,7 @@ pybind11::tuple createPillars(pybind11::array_t points, pybind11::array_t tensor; pybind11::array_t indices; - tensor.resize({1, maxPillars, maxPointsPerPillar, 7}); + tensor.resize({1, maxPillars, maxPointsPerPillar, 9}); indices.resize({1, maxPillars, 3}); int pillarId = 0; @@ -124,13 +128,23 @@ pybind11::tuple createPillars(pybind11::array_t points, break; } - tensor.mutable_at(0, pillarId, pointId, 0) = p.x - (xIndex * xStep + xMin); - tensor.mutable_at(0, pillarId, pointId, 1) = p.y - (yIndex * yStep + yMin); - tensor.mutable_at(0, pillarId, pointId, 2) = p.z - zMid; + // tensor.mutable_at(0, pillarId, pointId, 0) = p.x - (xIndex * xStep + xMin); + // tensor.mutable_at(0, pillarId, pointId, 1) = p.y - (yIndex * yStep + yMin); + // tensor.mutable_at(0, pillarId, pointId, 2) = p.z - zMid; + // tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; + // tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; + // tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; + // tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; + + tensor.mutable_at(0, pillarId, pointId, 0) = p.x; + tensor.mutable_at(0, pillarId, pointId, 1) = p.y; + tensor.mutable_at(0, pillarId, pointId, 2) = p.z; tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; + tensor.mutable_at(0, pillarId, pointId, 7) = p.x - (xIndex * xStep + xMin); + tensor.mutable_at(0, pillarId, pointId, 8) = p.y - (yIndex * yStep + yMin); pointId++; } @@ -329,6 +343,42 @@ float iou(const BoundingBox3D& box1, return area_overlap / (area_poly1 + area_poly2 - area_overlap); } + +// Calculates the IOU between two bounding boxes. +float iou(const BoundingBox3D& box1, + const BoundingBox3D& box2) +{ + const auto& box_as_vector = boundingBox3DToTopDown(box1); + const auto& box_as_vector_2 = boundingBox3DToTopDown(box2); + const auto& clipped_vector = sutherlandHodgmanClip(box_as_vector, box_as_vector_2); + + float area_poly1 = polygonArea(box_as_vector); + float area_poly2 = polygonArea(box_as_vector_2); + float area_overlap = polygonArea(clipped_vector); + + return area_overlap / (area_poly1 + area_poly2 - area_overlap); +} + + +// Calculates the IOU between two bounding boxes. +float iou3D(const BoundingBox3D& box1, + const BoundingBox3D& box2) +{ + const auto& box_as_vector = boundingBox3DToTopDown(box1); + const auto& box_as_vector_2 = boundingBox3DToTopDown(box2); + const auto& clipped_vector = sutherlandHodgmanClip(box_as_vector, box_as_vector_2); + + float volume_poly1 = polygonArea(box_as_vector) * box1.height; + float volume_poly2 = polygonArea(box_as_vector_2) * box2.height; + + float topZ = min(box1.height/2 + box1.z, box2.height/2 + box2.z); + float bottomZ = max( - box1.height/2 + box1.z, - box2.height/2 + box2.z); + + float volume_overlap = polygonArea(clipped_vector) * (topZ - bottomZ); + + return volume_overlap / (volume_poly1 + volume_poly2 - volume_overlap); +} + int clip(int n, int lower, int upper) { return std::max(lower, std::min(n, upper)); } @@ -462,6 +512,8 @@ std::tuple, int, int> createPillarsTarget(const pybind1 float maxIou = 0; BoundingBox3D bestAnchor = {}; int bestAnchorId = 0; + int bestAnchorXId = 0; + int bestAnchorYId = 0; for (int xId = xStart; xId < xEnd; xId++) // Iterate through every box within search diameter // In our example case, from 3 till 8 { @@ -487,6 +539,14 @@ std::tuple, int, int> createPillarsTarget(const pybind1 maxIou = iouOverlap; bestAnchor = anchorBox; bestAnchorId = anchorCount; + bestAnchorXId = xId; + bestAnchorYId = yId; + // if(printTime){ + // if(anchorCount == 3){ + // py::print("\nIoU old: " +std::to_string(iouOverlap) + " new: " + std::to_string(maxIou)); + // } + // } + } if (iouOverlap > positiveThreshold) // Accept the Anchor. Add the anchor details to the tensor. @@ -503,7 +563,8 @@ std::tuple, int, int> createPillarsTarget(const pybind1 tensor.mutable_at(objectCount, xId, yId, anchorCount, 5) = std::log(labelBox.width / anchorBox.width); tensor.mutable_at(objectCount, xId, yId, anchorCount, 6) = std::log(labelBox.height / anchorBox.height); - tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = std::sin(labelBox.yaw - anchorBox.yaw); //delta yaw + // tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = std::sin(labelBox.yaw - anchorBox.yaw); //delta yaw + tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = labelBox.yaw - anchorBox.yaw; //delta yaw if (labelBox.yaw > 0) // Is yaw > 0 { tensor.mutable_at(objectCount, xId, yId, anchorCount, 8) = 1; @@ -537,13 +598,19 @@ std::tuple, int, int> createPillarsTarget(const pybind1 if (printTime) { // std::cout << "\nThere was no sufficiently overlapping anchor anywhere for object " << objectCount << std::endl; - // py::print("There was no sufficiently overlapping anchor anywhere for object " +std::to_string(objectCount)); + py::print("\nThere was no sufficiently overlapping anchor anywhere for object " +std::to_string(objectCount)); // std::cout << "Best IOU was " << maxIou << ". Adding the best location regardless of threshold." << std::endl; - // py::print("Best IOU was "+std::to_string(maxIou)+" Adding the best location regardless of threshold"); + py::print("\nBest IOU of anchor " + std::to_string(bestAnchorId) + " was "+std::to_string(maxIou)+" Adding the best location regardless of threshold"); + // py::print("\nBest IOU.x was "+std::to_string(bestAnchor.x)+" "); + // py::print("\nBest IOU.y was "+std::to_string(bestAnchor.y)+" "); + // py::print("\nBest IOU.z was "+std::to_string(bestAnchor.z)+" "); + // py::print("\nBest IOU.ry was "+std::to_string(bestAnchor.yaw)+" "); } - const auto xId = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); - const auto yId = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); + const auto xId = bestAnchorXId; + const auto yId = bestAnchorYId; + // const auto xId = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); + // const auto yId = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); const float diag = std::sqrt(std::pow(bestAnchor.width, 2) + std::pow(bestAnchor.length, 2)); tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 0) = 1; @@ -556,7 +623,8 @@ std::tuple, int, int> createPillarsTarget(const pybind1 tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 5) = std::log(labelBox.width / bestAnchor.width); tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 6) = std::log(labelBox.height / bestAnchor.height); - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = std::sin(labelBox.yaw - bestAnchor.yaw); + // tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = std::sin(labelBox.yaw - bestAnchor.yaw); + tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = labelBox.yaw - bestAnchor.yaw; if (labelBox.yaw > 0) { tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 8) = 1; @@ -589,9 +657,14 @@ std::tuple, int, int> createPillarsTarget(const pybind1 return std::make_tuple(tensor, posCnt, negCnt); } +float cmath_sin(float value){ + return std::sin(value); +} + PYBIND11_MODULE(point_pillars, m) { m.def("createPillars", &createPillars, "Runs function to create point pillars input tensors"); m.def("createPillarsTarget", &createPillarsTarget, "Runs function to create point pillars output ground truth"); + m.def("cmath_sin", &cmath_sin, "Runs function to compute sine"); } diff --git a/src/point_pillars_v2.cpp b/src/point_pillars_v2.cpp deleted file mode 100644 index 85a240e..0000000 --- a/src/point_pillars_v2.cpp +++ /dev/null @@ -1,634 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -// #include -namespace py = pybind11; - -struct IntPairHash { - std::size_t operator()(const std::pair &p) const { - assert(sizeof(std::size_t)>=8); - //Shift first integer over to make room for the second integer. The two are - //then packed side by side. - return (((uint64_t)p.first)<<32) | ((uint64_t)p.second); - } -}; - -struct PillarPoint { - float x; - float y; - float z; - float intensity; - float xc; - float yc; - float zc; - float xp; - float yp; -}; - -pybind11::tuple createPillars(pybind11::array_t points, - int maxPointsPerPillar, - int maxPillars, - float xStep, - float yStep, - float xMin, - float xMax, - float yMin, - float yMax, - float zMin, - float zMax, - bool printTime = false) -{ - std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); - - if (points.ndim() != 2 || points.shape()[1] != 4) - { - throw std::runtime_error("numpy array with shape (n, 4) expected (n being the number of points)"); - } - - std::unordered_map, std::vector, IntPairHash> map; - - for (int i = 0; i < points.shape()[0]; ++i) - { - if ((points.at(i, 0) < xMin) || (points.at(i, 0) >= xMax) || \ - (points.at(i, 1) < yMin) || (points.at(i, 1) >= yMax) || \ - (points.at(i, 2) < zMin) || (points.at(i, 2) >= zMax)) - { - continue; - } - - auto xIndex = static_cast(std::floor((points.at(i, 0) - xMin) / xStep)); - auto yIndex = static_cast(std::floor((points.at(i, 1) - yMin) / yStep)); - - PillarPoint p = { - points.at(i, 0), // x - points.at(i, 1), // y - points.at(i, 2), // z - points.at(i, 3), // intensity - 0, // xc - 0, // yc - 0, // zc - 0, // xp - 0, // yp - }; - - map[{xIndex, yIndex}].emplace_back(p); - } - - pybind11::array_t tensor; - pybind11::array_t indices; - - tensor.resize({1, maxPillars, maxPointsPerPillar, 9}); - indices.resize({1, maxPillars, 3}); - - int pillarId = 0; - for (auto& pair: map) - { - if (pillarId >= maxPillars) - { - break; - } - - float xMean = 0; - float yMean = 0; - float zMean = 0; - for (const auto& p: pair.second) - { - xMean += p.x; - yMean += p.y; - zMean += p.z; - } - xMean /= pair.second.size(); - yMean /= pair.second.size(); - zMean /= pair.second.size(); - - for (auto& p: pair.second) - { - p.xc = p.x - xMean; - p.yc = p.y - yMean; - p.zc = p.z - zMean; - } - - auto xIndex = static_cast(std::floor((xMean - xMin) / xStep)); - auto yIndex = static_cast(std::floor((yMean - yMin) / yStep)); - auto zMid = (zMax - zMin) * 0.5f; - indices.mutable_at(0, pillarId, 1) = xIndex; - indices.mutable_at(0, pillarId, 2) = yIndex; - - int pointId = 0; - for (const auto& p: pair.second) - { - if (pointId >= maxPointsPerPillar) - { - break; - } - - // tensor.mutable_at(0, pillarId, pointId, 0) = p.x - (xIndex * xStep + xMin); - // tensor.mutable_at(0, pillarId, pointId, 1) = p.y - (yIndex * yStep + yMin); - // tensor.mutable_at(0, pillarId, pointId, 2) = p.z - zMid; - // tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; - // tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; - // tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; - // tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; - - tensor.mutable_at(0, pillarId, pointId, 0) = p.x; - tensor.mutable_at(0, pillarId, pointId, 1) = p.y; - tensor.mutable_at(0, pillarId, pointId, 2) = p.z; - tensor.mutable_at(0, pillarId, pointId, 3) = p.intensity; - tensor.mutable_at(0, pillarId, pointId, 4) = p.xc; - tensor.mutable_at(0, pillarId, pointId, 5) = p.yc; - tensor.mutable_at(0, pillarId, pointId, 6) = p.zc; - tensor.mutable_at(0, pillarId, pointId, 7) = p.x - (xIndex * xStep + xMin); - tensor.mutable_at(0, pillarId, pointId, 8) = p.y - (yIndex * yStep + yMin); - - pointId++; - } - - pillarId++; - } - - pybind11::tuple result = pybind11::make_tuple(tensor, indices); - - std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast( t2 - t1 ).count(); - if (printTime) - std::cout << "createPillars took: " << static_cast(duration) / 1e6 << " seconds" << std::endl; - - return result; -} - -struct BoundingBox3D -{ - float x; - float y; - float z; - float length; - float width; - float height; - float yaw; - float classId; -}; - -struct Point2D { - float x; - float y; -}; - -typedef std::vector Polyline2D; - -// Returns x-value of point of intersection of two lines -float xIntersect(float x1, float y1, float x2, float y2, - float x3, float y3, float x4, float y4) -{ - float num = (x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4); - float den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4); - return num/den; -} - -// Returns y-value of point of intersection of two lines -float yIntersect(float x1, float y1, float x2, float y2, - float x3, float y3, float x4, float y4) -{ - float num = (x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4); - float den = (x1-x2) * (y3-y4) - (y1-y2) * (x3-x4); - return num/den; -} - -// Returns area of polygon using the shoelace method -float polygonArea(const Polyline2D &polygon) -{ - float area = 0.0; - - size_t j = polygon.size()-1; - for (size_t i = 0; i < polygon.size(); i++) - { - area += (polygon[j].x + polygon[i].x) * (polygon[j].y - polygon[i].y); - j = i; // j is previous vertex to i - } - - return std::abs(area / 2.0); // Return absolute value -} - -float rotatedX(float x, float y, float angle) -{ - return x * std::cos(angle) - y * std::sin(angle); -} - -float rotatedY(float x, float y, float angle) -{ - return x * std::sin(angle) + y * std::cos(angle); -} - -// Construct bounding box in 2D, coordinates are returned in clockwise order -Polyline2D boundingBox3DToTopDown(const BoundingBox3D &box1) -{ - Polyline2D box; - box.push_back({rotatedX(-0.5 * box1.width, 0.5 * box1.length, - box1.yaw) + box1.x, - rotatedY(-0.5 * box1.width, 0.5 * box1.length, - box1.yaw) + box1.y}); - - box.push_back({rotatedX(0.5 * box1.width, 0.5 * box1.length, - box1.yaw) + box1.x, - rotatedY(0.5 * box1.width, 0.5 * box1.length, - box1.yaw) + box1.y}); - - box.push_back({rotatedX(0.5 * box1.width, -0.5 * box1.length, - box1.yaw) + box1.x, - rotatedY(0.5 * box1.width, -0.5 * box1.length, - box1.yaw) + box1.y}); - - box.push_back({rotatedX(-0.5 * box1.width, -0.5 * box1.length, - box1.yaw) + box1.x, - rotatedY(-0.5 * box1.width, -0.5 * box1.length, - box1.yaw) + box1.y}); - - return box; -} - -// This functions clips all the edges w.r.t one Clip edge of clipping area -// Returns a clipped polygon... -Polyline2D clip(const Polyline2D &poly_points, - float x1, - float y1, - float x2, - float y2) -{ - Polyline2D new_points; - - for (size_t i = 0; i < poly_points.size(); i++) - { - // (ix,iy),(kx,ky) are the co-ordinate values of the points - // i and k form a line in polygon - size_t k = (i+1) % poly_points.size(); - float ix = poly_points[i].x, iy = poly_points[i].y; - float kx = poly_points[k].x, ky = poly_points[k].y; - - // Calculating position of first point w.r.t. clipper line - float i_pos = (x2-x1) * (iy-y1) - (y2-y1) * (ix-x1); - - // Calculating position of second point w.r.t. clipper line - float k_pos = (x2-x1) * (ky-y1) - (y2-y1) * (kx-x1); - - // Case 1 : When both points are inside - if (i_pos < 0 && k_pos < 0) - { - //Only second point is added - new_points.push_back({kx,ky}); - } - - // Case 2: When only first point is outside - else if (i_pos >= 0 && k_pos < 0) - { - // Point of intersection with edge - // and the second point is added - new_points.push_back({xIntersect(x1, y1, x2, y2, ix, iy, kx, ky), - yIntersect(x1, y1, x2, y2, ix, iy, kx, ky)}); - new_points.push_back({kx,ky}); - - } - - // Case 3: When only second point is outside - else if (i_pos < 0 && k_pos >= 0) - { - //Only point of intersection with edge is added - new_points.push_back({xIntersect(x1, y1, x2, y2, ix, iy, kx, ky), - yIntersect(x1, y1, x2, y2, ix, iy, kx, ky)}); - - } - // Case 4: When both points are outside - else - { - //No points are added - } - } - - return new_points; -} - -// Implements Sutherland–Hodgman algorithm -// Returns a polygon with the intersection between two polygons. -Polyline2D sutherlandHodgmanClip(const Polyline2D &poly_points_vector, - const Polyline2D &clipper_points) -{ - Polyline2D clipped_poly_points_vector = poly_points_vector; - for (size_t i=0; i, int, int> createPillarsTarget(const pybind11::array_t& objectPositions, - const pybind11::array_t& objectDimensions, - const pybind11::array_t& objectYaws, - const pybind11::array_t& objectClassIds, - const pybind11::array_t& anchorDimensions, - const pybind11::array_t& anchorZHeights, - const pybind11::array_t& anchorYaws, - float positiveThreshold, - float negativeThreshold, - int nbClasses, - int downscalingFactor, - float xStep, - float yStep, - float xMin, - float xMax, - float yMin, - float yMax, - float zMin, - float zMax, - bool printTime = false) -{ - - std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); - - const auto xSize = static_cast(std::floor((xMax - xMin) / (xStep * downscalingFactor))); -// py::print("xSize", xSize); - const auto ySize = static_cast(std::floor((yMax - yMin) / (yStep * downscalingFactor))); -// py::print("ySize", ySize); - - const int nbAnchors = anchorDimensions.shape()[0]; //4 Number of anchors -// py::print("nbAnchors", nbAnchors); -// Anchor length - - if (nbAnchors <= 0) - { - throw std::runtime_error("Anchor length is zero"); - } - - const int nbObjects = objectDimensions.shape()[0]; //6 Number of labels inside a label.txt file -// BB dimensions from the label file - if (nbObjects <= 0) - { - throw std::runtime_error("Object length is zero"); - } -// py::print("nbObjects", nbObjects); - - // parse numpy arrays -// Preparing the anchor bounding box - std::vector anchorBoxes = {}; - std::vector anchorDiagonals; - for (int i = 0; i < nbAnchors; ++i) - { - BoundingBox3D anchorBox = {}; - anchorBox.x = 0; - anchorBox.y = 0; - anchorBox.length = anchorDimensions.at(i, 0); - anchorBox.width = anchorDimensions.at(i, 1); - anchorBox.height = anchorDimensions.at(i, 2); - anchorBox.z = anchorZHeights.at(i); - anchorBox.yaw = anchorYaws.at(i); - anchorBoxes.emplace_back(anchorBox); // Appends a new anchorBox to the AnchorBoxes container - // Note that anchor box doesn't have a classId as of now. - anchorDiagonals.emplace_back(std::sqrt(std::pow(anchorBox.width, 2) + std::pow(anchorBox.length, 2))); - } - -// Preparing the label bounding box - std::vector labelBoxes = {}; - for (int i = 0; i < nbObjects; ++i) - { - float x = objectPositions.at(i, 0); - float y = objectPositions.at(i, 1); - if (x < xMin | x > xMax | y < yMin | y > yMax) - { - continue; - } - BoundingBox3D labelBox = {}; - labelBox.x = x; - labelBox.y = y; - labelBox.z = objectPositions.at(i, 2); - labelBox.length = objectDimensions.at(i, 0); - labelBox.width = objectDimensions.at(i, 1); - labelBox.height = objectDimensions.at(i, 2); - labelBox.yaw = objectYaws.at(i); - labelBox.classId = objectClassIds.at(i); - labelBoxes.emplace_back(labelBox); - } - - pybind11::array_t tensor; - tensor.resize({nbObjects, xSize, ySize, nbAnchors, 10}); //Tensor of size (6,252,252,4,10) for first file - - pybind11::buffer_info tensor_buffer = tensor.request(); - float *ptr1 = (float *) tensor_buffer.ptr; - // Zero filling the tensor. Every element is presently zero - for (size_t idx = 0; idx < nbObjects * xSize * ySize * nbAnchors * 10; idx++) - { - ptr1[idx] = 0; - } - - int posCnt = 0; - int negCnt = 0; - int objectCount = 0; - if (printTime) - { -// std::cout << "Received " << labelBoxes.size() << " objects" << std::endl; -// py::print("Received "+str(labelBoxes.size())+" objects"); - } - for (const auto& labelBox: labelBoxes) //For every label box which is a 3d bounding box - { - // zone-in on potential spatial area of interest - // Length of (width,length) axis diagonal. - float objectDiameter = std::sqrt(std::pow(labelBox.width, 2) + std::pow(labelBox.length, 2)); - // Offset = Number of grid boxes that can fit on the object diameter - const auto x_offset = static_cast(std::ceil(objectDiameter / (xStep * downscalingFactor))); - const auto y_offset = static_cast(std::ceil(objectDiameter / (yStep * downscalingFactor))); - // Xc = Number of grid boxes that can fit between Xmin (Ymin) and Label's x (y) coordinate - const auto xC = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); - const auto yC = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); - // X(Y)Start = Start from Xc (Yc) - Number of boxes in object's diameter. - // For example the object is located at 5 unites and is 2 unites long. Then X(Y)start will begin - // the search from 3 - const auto xStart = clip(xC - x_offset, 0, xSize); - const auto yStart = clip(yC - y_offset, 0, ySize); - // Similarly end the search at 8 units. Because the object cannot extend beyond that. - const auto xEnd = clip(xC + x_offset, 0, xSize); - const auto yEnd = clip(yC + y_offset, 0, ySize); - - float maxIou = 0; - BoundingBox3D bestAnchor = {}; - int bestAnchorId = 0; - int bestAnchorXId = 0; - int bestAnchorYId = 0; - for (int xId = xStart; xId < xEnd; xId++) // Iterate through every box within search diameter - // In our example case, from 3 till 8 - { - const float x = xId * xStep * downscalingFactor + xMin; - // Getting the real world x coordinate - for (int yId = yStart; yId < yEnd; yId++) // Iterate through every box within search diamter in y axis - { - const float y = yId * yStep * downscalingFactor + yMin; - // Get the real world y coordinates - int anchorCount = 0; - for (auto& anchorBox: anchorBoxes) // For every anchor box (4 in our case) - // Note that we are checking every anchor box for every label in the file - { - anchorBox.x = x; // Assign the real world x and y coordinate to the anchor box - anchorBox.y = y; // Note that anchor boxes originally didn't have Xs and Ys. - // This is because we need ot check them along the X-Y grid. - // However, they did have a z value attached to them. - - const float iouOverlap = iou(anchorBox, labelBox); // Get IOU between two 3D boxes. - - if (maxIou < iouOverlap) - { - maxIou = iouOverlap; - bestAnchor = anchorBox; - bestAnchorId = anchorCount; - bestAnchorXId = xId; - bestAnchorYId = yId; - // if(printTime){ - // if(anchorCount == 3){ - // py::print("\nIoU old: " +std::to_string(iouOverlap) + " new: " + std::to_string(maxIou)); - // } - // } - - } - - if (iouOverlap > positiveThreshold) // Accept the Anchor. Add the anchor details to the tensor. - { - // Tensor at CurrentObject Id, xth grid cell, yth grid cell, currentAnchor, 0 - tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = 1; - - auto diag = anchorDiagonals[anchorCount]; - tensor.mutable_at(objectCount, xId, yId, anchorCount, 1) = (labelBox.x - anchorBox.x) / diag; // delta x,y,z - tensor.mutable_at(objectCount, xId, yId, anchorCount, 2) = (labelBox.y - anchorBox.y) / diag; - tensor.mutable_at(objectCount, xId, yId, anchorCount, 3) = (labelBox.z - anchorBox.z) / anchorBox.height; - - tensor.mutable_at(objectCount, xId, yId, anchorCount, 4) = std::log(labelBox.length / anchorBox.length); // delta l,w,h - tensor.mutable_at(objectCount, xId, yId, anchorCount, 5) = std::log(labelBox.width / anchorBox.width); - tensor.mutable_at(objectCount, xId, yId, anchorCount, 6) = std::log(labelBox.height / anchorBox.height); - - // tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = std::sin(labelBox.yaw - anchorBox.yaw); //delta yaw - tensor.mutable_at(objectCount, xId, yId, anchorCount, 7) = labelBox.yaw - anchorBox.yaw; //delta yaw - if (labelBox.yaw > 0) // Is yaw > 0 - { - tensor.mutable_at(objectCount, xId, yId, anchorCount, 8) = 1; - } - else - { - tensor.mutable_at(objectCount, xId, yId, anchorCount, 8) = 0; - } - - tensor.mutable_at(objectCount, xId, yId, anchorCount, 9) = labelBox.classId; - - } - else if (iouOverlap < negativeThreshold) - { - tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = 0; - } - else - { - tensor.mutable_at(objectCount, xId, yId, anchorCount, 0) = -1; - } - - anchorCount++; - } - } - } - - if (maxIou < positiveThreshold) // Comparing maxIOU for that object obtained after checking with every anchor box - // If none of the anchors passed the threshold, then we place the best anchor details for that object. - { - negCnt++; - // if (printTime) - // { - // // std::cout << "\nThere was no sufficiently overlapping anchor anywhere for object " << objectCount << std::endl; - // py::print("\nThere was no sufficiently overlapping anchor anywhere for object " +std::to_string(objectCount)); - // // std::cout << "Best IOU was " << maxIou << ". Adding the best location regardless of threshold." << std::endl; - // py::print("\nBest IOU was "+std::to_string(maxIou)+" Adding the best location regardless of threshold"); - // py::print("\nBest IOU.x was "+std::to_string(bestAnchor.x)+" "); - // py::print("\nBest IOU.y was "+std::to_string(bestAnchor.y)+" "); - // py::print("\nBest IOU.z was "+std::to_string(bestAnchor.z)+" "); - // py::print("\nBest IOU.ry was "+std::to_string(bestAnchor.yaw)+" "); - // } - - const auto xId = bestAnchorXId; - const auto yId = bestAnchorYId; - // const auto xId = static_cast(std::floor((labelBox.x - xMin) / (xStep * downscalingFactor))); - // const auto yId = static_cast(std::floor((labelBox.y - yMin) / (yStep * downscalingFactor))); - const float diag = std::sqrt(std::pow(bestAnchor.width, 2) + std::pow(bestAnchor.length, 2)); - - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 0) = 1; - - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 1) = (labelBox.x - bestAnchor.x) / diag; - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 2) = (labelBox.y - bestAnchor.y) / diag; - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 3) = (labelBox.z - bestAnchor.z) / bestAnchor.height; - - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 4) = std::log(labelBox.length / bestAnchor.length); - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 5) = std::log(labelBox.width / bestAnchor.width); - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 6) = std::log(labelBox.height / bestAnchor.height); - - // tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = std::sin(labelBox.yaw - bestAnchor.yaw); - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 7) = labelBox.yaw - bestAnchor.yaw; - if (labelBox.yaw > 0) - { - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 8) = 1; - } - else - { - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 8) = 0; - } -// Class id is the classification label (0,1,2,3) - tensor.mutable_at(objectCount, xId, yId, bestAnchorId, 9) = labelBox.classId; - } - else - { - posCnt++; - if (printTime) - { - std::cout << "\nAt least 1 anchor was positively matched for object " << objectCount << std::endl; - std::cout << "Best IOU was " << maxIou << "." << std::endl; - } - } - - objectCount++; - } - - std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast( t2 - t1 ).count(); - if (printTime) - std::cout << "createPillarsTarget took: " << static_cast(duration) / 1e6 << " seconds" << std::endl; - - return std::make_tuple(tensor, posCnt, negCnt); -} - -float cmath_sin(float value){ - return std::sin(value); -} - - -PYBIND11_MODULE(point_pillars_v2, m) -{ - m.def("createPillars", &createPillars, "Runs function to create point pillars input tensors"); - m.def("createPillarsTarget", &createPillarsTarget, "Runs function to create point pillars output ground truth"); - m.def("cmath_sin", &cmath_sin, "Runs function to compute sine"); -} From 4ab1d55599245c950ccc62a0e16bfe6294d0f280 Mon Sep 17 00:00:00 2001 From: tjtanaa Date: Mon, 23 Nov 2020 15:57:26 +0800 Subject: [PATCH 3/4] Evaluation; evaluate the output using official cpp code --- inference_utils_v2.py | 19 ++- point_pillars_custom_processors_v2_2.py | 62 +--------- point_pillars_evaluation_v2_2.py | 158 ++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 64 deletions(-) create mode 100644 point_pillars_evaluation_v2_2.py diff --git a/inference_utils_v2.py b/inference_utils_v2.py index 685b1ec..3d5eb4a 100644 --- a/inference_utils_v2.py +++ b/inference_utils_v2.py @@ -96,14 +96,12 @@ def limit_period(val, offset=0.5, period=np.pi): return val - np.floor(val / period + offset) * period def inverse_yaw_element(bb_yaw): - - bb_yaw -= np.pi / 2 while bb_yaw > np.pi: - print("larger than pi") + # print("larger than pi") bb_yaw -= (np.pi * 2) while bb_yaw < -np.pi: - print("smaller than -pi") + # print("smaller than -pi") bb_yaw += (np.pi * 2) return bb_yaw @@ -162,6 +160,19 @@ def generate_bboxes_from_pred_and_np_array(occ, pos, siz, ang, hdg, clf, anchor_ return predicted_boxes, np.array(predicted_boxes_list) +def convert_boxes_to_list(set_boxes): + # (B, N) + batch_predicted_boxes_list = [] + for batch_idx in range(len(set_boxes)): + predicted_boxes_list = [] + + for box in set_boxes[batch_idx]: + + predicted_boxes_list.append([box.x, box.y, box.z, box.length, box.width, box.height, + box.yaw, box.heading, box.cls, box.conf]) + + batch_predicted_boxes_list.append(predicted_boxes_list) + return batch_predicted_boxes_list class GroundTruthGenerator(DataProcessor): """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ diff --git a/point_pillars_custom_processors_v2_2.py b/point_pillars_custom_processors_v2_2.py index ae2b17c..2c0886c 100644 --- a/point_pillars_custom_processors_v2_2.py +++ b/point_pillars_custom_processors_v2_2.py @@ -70,9 +70,6 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): # filter labels by classes (cars, pedestrians and Trams) # Label has 4 properties (Classification (0th index of labels file), # centroid coordinates, dimensions, yaw) - # labels = list(filter(lambda x: x.classification in self.classes, labels)) - - if len(gt_boxes_3d) == 0: pX, pY = int(self.Xn / self.downscaling_factor), int(self.Yn / self.downscaling_factor) @@ -82,12 +79,6 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_classes), dtype='float64') # For each label file, generate these properties except for the Don't care class - - # target_positions = np.array([label.centroid for label in labels], dtype=np.float32) - # target_dimension = np.array([label.dimension for label in labels], dtype=np.float32) - # target_yaw = np.array([label.yaw for label in labels], dtype=np.float32) - # target_class = np.array([self.classes[label.classification] for label in labels], dtype=np.int32) - target_positions = gt_boxes_3d[:,:3] target_dimension = gt_boxes_3d[:,3:6] # don't have to translate again target_yaw = gt_boxes_3d[:, 6] @@ -125,13 +116,11 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): self.neg_cnt += neg # return a merged target view for all objects in the ground truth and get categorical labels - # print("target.shape: ", target.shape) + sel = select_best_anchors(target) - # print("self.shape: ", sel[...,0].shape) ohe = tf.keras.utils.to_categorical(sel[..., 9], num_classes=self.nb_classes, dtype='float64') # print("self.shape: ", sel[...,0].shape) - # print("ohe.shape: ", ohe.shape) - # print("sel[8].shape: ",sel[..., 8].shape) + return sel[..., 0], sel[..., 1:4], sel[..., 4:7], sel[..., 7], sel[..., 8], ohe @@ -149,11 +138,8 @@ def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str random_select=random_select, gt_database_dir=gt_database_dir, aug_hard_ratio=aug_hard_ratio, **kwargs ) - # self.data_reader = data_reader self.batch_size = batch_size self.sample_id_list=self.get_sample_id_list() - # self.split = split - # print("CustomDataGenerator: " ,self.split) def get_sample(self, index): @@ -165,7 +151,6 @@ def __len__(self): def __getitem__(self, batch_id: int): file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) - # print("inside getitem") pillars = [] voxels = [] occupancy = [] @@ -229,9 +214,7 @@ def __getitem__(self, batch_id: int): return [pillars, voxels] def on_epoch_end(self): - # print("inside epoch") if self.split=='train' or self.split =='val': - # pass self.sample_id_list=shuffle(self.sample_id_list) @@ -251,9 +234,6 @@ def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str # self.data_reader = data_reader self.batch_size = batch_size self.sample_id_list=self.get_sample_id_list() - # self.split = split - # self.split = split - # print("AnalyseCustomDataGenerator: " ,self.split) def _get_sample(self, index): @@ -265,7 +245,6 @@ def __len__(self): def __getitem__(self, batch_id: int): file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) - # print("inside getitem") pillars = [] voxels = [] occupancy = [] @@ -279,23 +258,15 @@ def __getitem__(self, batch_id: int): gt_boxes3d_ = [] sample_ = [] - # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" - # # Initialize and setup output directory. - # Converter = PointvizConverter(save_viz_path) - for i in file_ids: - # print(i) - # print(type(i)) sample = self._get_sample(i) # For each file, dividing the space into a x-y grid to create pillars pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) - # print(pts_lidar.shape) pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) - # print(gt_boxes3d_xyz.shape) gt_boxes3d = np.concatenate(( gt_boxes3d_xyz[:,0,np.newaxis], # 0 x @@ -306,44 +277,17 @@ def __getitem__(self, batch_id: int): sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry ), axis=1) - - # print(type(gt_boxes3d)) - # gt_boxes3d = self.limit_yaw(gt_boxes3d) - - # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) - # print(bbox_params.shape) - # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], - # bbox_params=bbox_params) - - - # exit() - - # print(pts_input.shape) # Voxels are the pillar ids pillars_, voxels_ = self.make_point_pillars(pts_input) - # print(pillars_.shape, voxels_.shape) - # for i in range(10): - # print(pillars_[0,0,i,:]) - # print(np.sum(pillars_ > 0)) - # exit() - pillars.append(pillars_) voxels.append(voxels_) - # print(sample['gt_cls_type_list']) - # print("split: ", self.split) if self.split=='train' or self.split =='val': - # print(len(gt_boxes3d), ", ", len(sample['gt_cls_type_list'])) if (len(gt_boxes3d) == 0): print("file id: ", i, " has zero gt label") occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( gt_boxes3d, sample['gt_cls_type_list']) - # print(len(a)) - # if - - # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) - occupancy.append(occupancy_) position.append(position_) @@ -356,8 +300,6 @@ def __getitem__(self, batch_id: int): gt_boxes3d_.append(gt_boxes3d) pts_input_.append(pts_input) - # exit() - pillars = np.concatenate(pillars, axis=0) voxels = np.concatenate(voxels, axis=0) diff --git a/point_pillars_evaluation_v2_2.py b/point_pillars_evaluation_v2_2.py new file mode 100644 index 0000000..01fd5aa --- /dev/null +++ b/point_pillars_evaluation_v2_2.py @@ -0,0 +1,158 @@ + + +import os +from glob import glob +import numpy as np +import tensorflow as tf +from point_pillars_custom_processors_v2_2 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator +from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array, convert_boxes_to_list +from readers import KittiDataReader +from config_v2_2 import Parameters +from network_v2_2 import build_point_pillar_graph +from datetime import datetime + +from det3d.kitti_dataset.utils.evaluation import save_kitti_format, save_kitti_format_for_evaluation + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" +MODEL_ROOT = "./logs_Car_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val_new_network" + +KITTI_EVALUATION_OUTPUT = os.path.join(MODEL_ROOT, "Evaluation") +if not os.path.exists(KITTI_EVALUATION_OUTPUT): + os.makedirs(KITTI_EVALUATION_OUTPUT) + +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "3" + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + pillar_net = build_point_pillar_graph(params) + pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + npoints=20000, split='train_val_test',random_select=False, classes=list(params.classes_map.keys())) + inference_duration = [] + sample_index = 0 # has to be controlled manually to ensure that the sequence number is continuous + + for batch_idx in range(0,len(validation_gen)): + [pillars, voxels], [occupancy_, position_, size_, angle_, heading_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + + start=datetime.now() + + occupancy, position, size, angle, heading = pillar_net.predict([pillars, voxels]) + + inference_duration.append( datetime.now()-start) + + classification = np.zeros(shape=np.array(occupancy).shape) + classification_ = classification + + set_boxes, confidences = [], [] + loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + for i in range(loop_range): + set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + heading[i], + classification[i], params.anchor_dims, occ_threshold=0.5) + + + _, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + heading_[i], + classification_[i], params.anchor_dims, occ_threshold=0.4) + + # gt_boxes3d_ = gt_boxes3d[i] + gt_boxes3d_ = decoded_gt_boxes3d + + print(gt_boxes3d_.shape) + if(len(gt_boxes3d_) == 0): + gt_bbox_params_list = [] + else: + gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + gt_boxes3d_[:,0], + gt_boxes3d_[:,6]], axis=1) + + + gt_bbox_params_list = gt_bbox_params.tolist() + # gt_bbox_params_list = [] + for k in range(len(gt_bbox_params_list)): + msg = "%.5f, %s, %.5f"%(decoded_gt_boxes3d[k,9], params.map_classes[int(decoded_gt_boxes3d[k,8])], decoded_gt_boxes3d[k,6]) + # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) + gt_bbox_params_list[k].append("Green") + # gt_bbox_params_list[k].append("1.0") + gt_bbox_params_list[k].append(msg) + + if len(set_box) > 0: + + + # NMS + # set_box + # print("start nms") + confidence = [float(box.conf) for box in set_box] + nms_boxes = rotational_nms([set_box], [confidence], occ_threshold=0.5, nms_iou_thr=0.5) + + predicted_boxes3d_list = convert_boxes_to_list(nms_boxes) + + predicted_boxes3d = np.array(predicted_boxes3d_list[0]) + predicted_boxes3d_ = predicted_boxes3d + + print("batch_idx: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") + + bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , + predicted_boxes3d_[:,0], + predicted_boxes3d_[:,6]], axis=1) + + + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(predicted_boxes3d.shape[0]): + msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) + + + # save as kitti format for evaluation + cur_sample_id = batch_idx * params.batch_size + i + sample_file_name = validation_gen.sample_id_list[cur_sample_id] + calib = sample[i]['calib'] + # cur_boxes3d = cur_boxes3d.cpu().numpy() + + cur_boxes3d_xyz = calib.lidar_to_rect(predicted_boxes3d[:, 0:3]) + + cur_boxes3d = np.concatenate(( + cur_boxes3d_xyz[:,0,np.newaxis], # 0 x + cur_boxes3d_xyz[:,1,np.newaxis] + predicted_boxes3d[:,5,np.newaxis] / 2, # 1 y + cur_boxes3d_xyz[:,2,np.newaxis], # 2 z + predicted_boxes3d[:,5,np.newaxis], # 3 l # same as the original label + predicted_boxes3d[:,4,np.newaxis], # 4 w # same as the original label + predicted_boxes3d[:,3,np.newaxis], # 5 h # same as the original label + -predicted_boxes3d[:,6,np.newaxis], # 6 ry + ), axis=1) + cur_scores_raw = predicted_boxes3d[:,-1] + image_shape = validation_gen.get_image_shape(sample_file_name) + labels_obj = validation_gen.get_label(sample_file_name) + classes = ['Car' for i in range(len(predicted_boxes3d))] + save_kitti_format_for_evaluation(sample_index, calib, cur_boxes3d, KITTI_EVALUATION_OUTPUT, cur_scores_raw, image_shape, classes, labels_obj) + sample_index += 1 + + coor = pts_input[i][:,[1,2,0]] + Converter.compile("evaluation_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + # print("Average runtime speed: ", np.mean(inference_duration[20:])) + From 78c8d8f907ba364136b6615986ac80dbec81390d Mon Sep 17 00:00:00 2001 From: tjtanaa Date: Wed, 25 Nov 2020 13:40:49 +0800 Subject: [PATCH 4/4] Added (single class no augmentation) mtr training, evaluation, and prediction script --- config_mtr_v1.py | 159 ++++++++++++ config_v2_2.py | 3 +- inference_utils_mtr_v1.py | 190 ++++++++++++++ inference_utils_v2.py | 30 --- inference_utils_v2_2.py | 190 ++++++++++++++ mtr_point_pillars_evaluation_v1.py | 164 ++++++++++++ mtr_point_pillars_prediction_v1.py | 138 ++++++++++ ...y => mtr_point_pilllars_training_run_v1.py | 62 +++-- ...m_processors_v2.py => mtr_processors_v1.py | 243 +++++++++--------- network_v2.py | 4 +- network_v2_2.py | 12 +- point_pillars_custom_prediction.py | 157 ----------- point_pillars_custom_prediction_v2_2.py | 41 +-- point_pillars_custom_processors_v2_2.py | 9 +- point_pillars_evaluation_v2_2.py | 6 +- point_pillars_test.py | 70 ----- point_pillars_training_custom_run_v2_2.py | 31 ++- point_pillars_training_run.py | 73 ------ point_pillars_visualize_input.py | 3 - 19 files changed, 1057 insertions(+), 528 deletions(-) create mode 100644 config_mtr_v1.py create mode 100644 inference_utils_mtr_v1.py create mode 100644 inference_utils_v2_2.py create mode 100644 mtr_point_pillars_evaluation_v1.py create mode 100644 mtr_point_pillars_prediction_v1.py rename point_pillars_training_custom_run_v2.py => mtr_point_pilllars_training_run_v1.py (59%) rename point_pillars_custom_processors_v2.py => mtr_processors_v1.py (62%) delete mode 100644 point_pillars_custom_prediction.py delete mode 100644 point_pillars_test.py delete mode 100644 point_pillars_training_run.py diff --git a/config_mtr_v1.py b/config_mtr_v1.py new file mode 100644 index 0000000..a1207f5 --- /dev/null +++ b/config_mtr_v1.py @@ -0,0 +1,159 @@ +import numpy as np + + +class GridParameters: + x_min = -10.08 + x_max = 10.08 + x_step = 0.04 + + y_min = -10.08 #-5 + y_max = 10.08 #7.5 + y_step = 0.04 + + # z_min = -1.0 + # z_max = 3.0 + z_min = -1.0 + z_max = 6.0 + + # derived parameters + Xn_f = float(x_max - x_min) / x_step + Yn_f = float(y_max - y_min) / y_step + Xn = int(Xn_f) + Yn = int(Yn_f) + + def __init__(self, **kwargs): + super(GridParameters, self).__init__(**kwargs) + + +class DataParameters: + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # for Car and Pedestrian + # map_classes = { + # 0: "Car", + # 1: "Pedestrian" + # } + + # classes_map = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # # "Cyclist": 2, + # # "Truck": 3, + # # "Van": 3, + # # "Tram": 3, + # # "Misc": 3, + # } + + + # for Car only + map_classes = { + 0: "pedestrian" + } + + classes_map = {"pedestrian": 0 + } + + # # for Pedestrian only + # map_classes = { + # 0: "Pedestrian" + # } + + # classes_map = { + # "Pedestrian": 0, + # "Person_sitting": 0, + # } + + nb_classes = len(np.unique(list(classes_map.values()))) + assert nb_classes == np.max(np.unique(list(classes_map.values()))) + 1, 'Starting class indexing at zero.' + + # classes = {"Car": 0, + # "Pedestrian": 1, + # "Person_sitting": 1, + # "Cyclist": 2, + # "Truck": 3, + # "Van": 3, + # "Tram": 3, + # "Misc": 3, + # } + + # nb_classes = len(np.unique(list(classes.values()))) + # assert nb_classes == np.max(np.unique(list(classes.values()))) + 1, 'Starting class indexing at zero.' + + def __init__(self, **kwargs): + super(DataParameters, self).__init__(**kwargs) + + +class NetworkParameters: + + max_points_per_pillar = 100 + max_pillars = 12000 + nb_features = 9 + nb_channels = 64 + downscaling_factor = 2 + + # length (x), width (y), height (z), z-center, orientation + # for car and pedestrian + # anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + # [3.9, 1.6, 1.56, -1, np.pi/2], + # [0.8, 0.6, 1.73, -0.6, 0], + # [0.8, 0.6, 1.73, -0.6, np.pi/2], + # ], dtype=np.float32).tolist() + + # for car only + # anchor_dims = np.array([[3.9, 1.6, 1.56, -1, 0], + # [3.9, 1.6, 1.56, -1, np.pi/2]], dtype=np.float32).tolist() + + # for pedestrian only + anchor_dims = np.array([ + [0.62, 0.56, 0.7, 1.8, 0], + [0.62, 0.56, 0.7, 1.8, np.pi/2], + [0.62, 0.56, 1.5, 1.63646424, 0], + [0.62, 0.56, 1.5, 1.63646424, np.pi/2], + ], dtype=np.float32).tolist() + nb_dims = 3 + + # for car + # positive_iou_threshold = 0.6 + # negative_iou_threshold = 0.3 + + # for pedestrian + positive_iou_threshold = 0.5 + negative_iou_threshold = 0.35 + + # batch_size = 1 + num_gpus = 1 + batch_size = 4 + total_training_epochs = 160 + # iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper + iters_to_decay = 100500 + learning_rate = 2e-4 + decay_rate = 1e-8 + L1 = 0 + L2 = 0 + alpha = 0.25 + gamma = 2.0 + # original pillars paper values + focal_weight = 1.0 # 1.0 + loc_weight = 2.0 # 2.0 + size_weight = 2.0 # 2.0 + angle_weight = 2.0 # 2.0 + heading_weight = 0.2 # 0.2 + class_weight = 0.5 # 0.2 + + def __init__(self, **kwargs): + super(NetworkParameters, self).__init__(**kwargs) + + +class Parameters(GridParameters, DataParameters, NetworkParameters): + + def __init__(self, **kwargs): + super(Parameters, self).__init__(**kwargs) diff --git a/config_v2_2.py b/config_v2_2.py index 5ad41a0..5143f05 100644 --- a/config_v2_2.py +++ b/config_v2_2.py @@ -127,7 +127,8 @@ class NetworkParameters: # negative_iou_threshold = 0.35 # batch_size = 1 - batch_size = 4 + num_gpus = 1 + batch_size = 4 total_training_epochs = 160 # iters_to_decay = 101040. # 15 * 4 * ceil(6733. / 4) --> every 15 epochs on 6733 kitti samples, cf. pillar paper iters_to_decay = 100500 diff --git a/inference_utils_mtr_v1.py b/inference_utils_mtr_v1.py new file mode 100644 index 0000000..fa91bd6 --- /dev/null +++ b/inference_utils_mtr_v1.py @@ -0,0 +1,190 @@ +import numpy as np +import cv2 as cv +from typing import List +from config_mtr_v1 import Parameters +from mtr_processors_v1 import DataProcessor + + +class BBox(tuple): + """ bounding box tuple that can easily be accessed while being compatible to cv2 rotational rects """ + + def __new__(cls, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + bbx_tuple = ((float(bb_x), float(bb_y)), (float(bb_length), float(bb_width)), float(np.rad2deg(bb_yaw))) + return super(BBox, cls).__new__(cls, tuple(bbx_tuple)) + + def __init__(self, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + self.x = bb_x + self.y = bb_y + self.z = bb_z + self.length = bb_length + self.width = bb_width + self.height = bb_height + self.yaw = bb_yaw + self.heading = bb_heading + self.cls = bb_cls + self.conf = bb_conf + + def __str__(self): + return "BB | Cls: %s, x: %f, y: %f, l: %f, w: %f, yaw: %f" % ( + self.cls, self.x, self.y, self.length, self.width, self.yaw) + + +def rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5): + """ rotational NMS + set_boxes = size NSeqs list of size NDet lists of tuples. each tuple has the form ((pos, pos), (size, size), angle) + confidences = size NSeqs list of lists containing NDet floats, i.e. one per detection + """ + assert len(set_boxes) == len(confidences) and 0 < occ_threshold < 1 and 0 < nms_iou_thr < 1 + if not len(set_boxes): + return [] + assert (isinstance(set_boxes[0][0][0][0], float) or isinstance(set_boxes[0][0][0][0], int)) and \ + (isinstance(confidences[0][0], float) or isinstance(confidences[0][0], int)) + nms_boxes = [] + for boxes, confs in zip(set_boxes, confidences): + assert len(boxes) == len(confs) + indices = cv.dnn.NMSBoxesRotated(boxes, confs, occ_threshold, nms_iou_thr) + # print(indices) + indices = indices.reshape(len(indices)).tolist() + nms_boxes.append([boxes[i] for i in indices]) + return nms_boxes + + +def generate_bboxes_from_pred(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + for i, value in enumerate(coordinates): + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_yaw = ang[value] + real_anchors[i][4] + # bb_yaw = -np.arcsin(np.clip(ang[value], -1, 1)) + real_anchors[i][4] + bb_heading = np.round(hdg[value]) + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + + + return predicted_boxes + + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +def inverse_yaw_element(bb_yaw): + bb_yaw -= np.pi / 2 + while bb_yaw > np.pi: + # print("larger than pi") + bb_yaw -= (np.pi * 2) + while bb_yaw < -np.pi: + # print("smaller than -pi") + bb_yaw += (np.pi * 2) + + return bb_yaw + + # if bb_yaw > np.pi /2: + # bb_yaw -= 2 * np.pi + + # bb_yaw += np.pi/2 + # return bb_yaw + +def generate_bboxes_from_pred_and_np_array(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # print(occ.shape) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + predicted_boxes_list = [] + for i, value in enumerate(coordinates): + # print("coordinate ", i) + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + # print("i: ", i, "\tx: ", real_x, "\ty:", real_y) + # print("i: ", i, "\tx: ", value[0], "\ty:", value[1]) + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_heading = np.round(hdg[value]) + bb_yaw = ang[value] + real_anchors[i][4] + # if np.int32(bb_heading) == 0: + # bb_yaw -= np.pi + + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + predicted_boxes_list.append([bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf]) + + return predicted_boxes, np.array(predicted_boxes_list) + +def convert_boxes_to_list(set_boxes): + # (B, N) + batch_predicted_boxes_list = [] + for batch_idx in range(len(set_boxes)): + predicted_boxes_list = [] + + for box in set_boxes[batch_idx]: + + predicted_boxes_list.append([box.x, box.y, box.z, box.length, box.width, box.height, + box.yaw, box.heading, box.cls, box.conf]) + + batch_predicted_boxes_list.append(predicted_boxes_list) + return batch_predicted_boxes_list + + +def focal_loss_checker(y_true, y_pred, n_occs=-1): + y_true = np.stack(np.where(y_true == 1)) + if n_occs == -1: + n_occs = y_true.shape[1] + occ_thr = np.sort(y_pred.flatten())[-n_occs] + y_pred = np.stack(np.where(y_pred >= occ_thr)) + p = 0 + for gt in range(y_true.shape[1]): + for pr in range(y_pred.shape[1]): + if np.all(y_true[:, gt] == y_pred[:, pr]): + p += 1 + break + print("#matched gt: ", p, " #unmatched gt: ", y_true.shape[1] - p, " #unmatched pred: ", y_pred.shape[1] - p, + " occupancy threshold: ", occ_thr) diff --git a/inference_utils_v2.py b/inference_utils_v2.py index 3d5eb4a..a34441a 100644 --- a/inference_utils_v2.py +++ b/inference_utils_v2.py @@ -2,7 +2,6 @@ import cv2 as cv from typing import List from config_v2 import Parameters -from readers import DataReader from point_pillars_custom_processors_v2 import DataProcessor @@ -174,35 +173,6 @@ def convert_boxes_to_list(set_boxes): batch_predicted_boxes_list.append(predicted_boxes_list) return batch_predicted_boxes_list -class GroundTruthGenerator(DataProcessor): - """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ - - def __init__(self, data_reader: DataReader, label_files: List[str], calibration_files: List[str] = None, - network_format: bool = False): - super(GroundTruthGenerator, self).__init__() - self.data_reader = data_reader - self.label_files = label_files - self.calibration_files = calibration_files - self.network_format = network_format - - def __len__(self): - return len(self.label_files) - - def __getitem__(self, file_id: int): - label = self.data_reader.read_label(self.label_files[file_id]) - R, t = self.data_reader.read_calibration(self.calibration_files[file_id]) - label_transformed = self.transform_labels_into_lidar_coordinates(label, R, t) - if self.network_format: - occupancy, position, size, angle, heading, classification = self.make_ground_truth(label_transformed) - occupancy = np.array(occupancy) - position = np.array(position) - size = np.array(size) - angle = np.array(angle) - heading = np.array(heading) - classification = np.array(classification) - return [occupancy, position, size, angle, heading, classification] - return label_transformed - def focal_loss_checker(y_true, y_pred, n_occs=-1): y_true = np.stack(np.where(y_true == 1)) diff --git a/inference_utils_v2_2.py b/inference_utils_v2_2.py new file mode 100644 index 0000000..e024624 --- /dev/null +++ b/inference_utils_v2_2.py @@ -0,0 +1,190 @@ +import numpy as np +import cv2 as cv +from typing import List +from config_v2_2 import Parameters +from point_pillars_custom_processors_v2_2 import DataProcessor + + +class BBox(tuple): + """ bounding box tuple that can easily be accessed while being compatible to cv2 rotational rects """ + + def __new__(cls, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + bbx_tuple = ((float(bb_x), float(bb_y)), (float(bb_length), float(bb_width)), float(np.rad2deg(bb_yaw))) + return super(BBox, cls).__new__(cls, tuple(bbx_tuple)) + + def __init__(self, bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, bb_yaw, bb_heading, bb_cls, bb_conf): + self.x = bb_x + self.y = bb_y + self.z = bb_z + self.length = bb_length + self.width = bb_width + self.height = bb_height + self.yaw = bb_yaw + self.heading = bb_heading + self.cls = bb_cls + self.conf = bb_conf + + def __str__(self): + return "BB | Cls: %s, x: %f, y: %f, l: %f, w: %f, yaw: %f" % ( + self.cls, self.x, self.y, self.length, self.width, self.yaw) + + +def rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5): + """ rotational NMS + set_boxes = size NSeqs list of size NDet lists of tuples. each tuple has the form ((pos, pos), (size, size), angle) + confidences = size NSeqs list of lists containing NDet floats, i.e. one per detection + """ + assert len(set_boxes) == len(confidences) and 0 < occ_threshold < 1 and 0 < nms_iou_thr < 1 + if not len(set_boxes): + return [] + assert (isinstance(set_boxes[0][0][0][0], float) or isinstance(set_boxes[0][0][0][0], int)) and \ + (isinstance(confidences[0][0], float) or isinstance(confidences[0][0], int)) + nms_boxes = [] + for boxes, confs in zip(set_boxes, confidences): + assert len(boxes) == len(confs) + indices = cv.dnn.NMSBoxesRotated(boxes, confs, occ_threshold, nms_iou_thr) + print(indices) + indices = indices.reshape(len(indices)).tolist() + nms_boxes.append([boxes[i] for i in indices]) + return nms_boxes + + +def generate_bboxes_from_pred(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + for i, value in enumerate(coordinates): + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_yaw = ang[value] + real_anchors[i][4] + # bb_yaw = -np.arcsin(np.clip(ang[value], -1, 1)) + real_anchors[i][4] + bb_heading = np.round(hdg[value]) + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + + + return predicted_boxes + + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +def inverse_yaw_element(bb_yaw): + bb_yaw -= np.pi / 2 + while bb_yaw > np.pi: + # print("larger than pi") + bb_yaw -= (np.pi * 2) + while bb_yaw < -np.pi: + # print("smaller than -pi") + bb_yaw += (np.pi * 2) + + return bb_yaw + + # if bb_yaw > np.pi /2: + # bb_yaw -= 2 * np.pi + + # bb_yaw += np.pi/2 + # return bb_yaw + +def generate_bboxes_from_pred_and_np_array(occ, pos, siz, ang, hdg, clf, anchor_dims, occ_threshold=0.5): + """ Generating the bounding boxes based on the regression targets """ + + # Get only the boxes where occupancy is greater or equal threshold. + real_boxes = np.where(occ >= occ_threshold) + # print(occ.shape) + # Get the indices of the occupancy array + coordinates = list(zip(real_boxes[0], real_boxes[1], real_boxes[2])) + # Assign anchor dimensions as original bounding box coordinates which will eventually be changed + # according to the predicted regression targets + anchor_dims = anchor_dims + real_anchors = np.random.rand(len(coordinates), len(anchor_dims[0])) + + for i, value in enumerate(real_boxes[2]): + real_anchors[i, ...] = anchor_dims[value] + + # Change the anchor boxes based on regression targets, this is the inverse of the operations given in + # createPillarTargets function (src/PointPillars.cpp) + predicted_boxes = [] + predicted_boxes_list = [] + for i, value in enumerate(coordinates): + # print("coordinate ", i) + real_diag = np.sqrt(np.square(real_anchors[i][0]) + np.square(real_anchors[i][1])) + real_x = value[0] * Parameters.x_step * Parameters.downscaling_factor + Parameters.x_min + real_y = value[1] * Parameters.y_step * Parameters.downscaling_factor + Parameters.y_min + # print("i: ", i, "\tx: ", real_x, "\ty:", real_y) + # print("i: ", i, "\tx: ", value[0], "\ty:", value[1]) + bb_x = pos[value][0] * real_diag + real_x + bb_y = pos[value][1] * real_diag + real_y + bb_z = pos[value][2] * real_anchors[i][2] + real_anchors[i][3] + # print(position[value], real_x, real_y, real_diag) + bb_length = np.exp(siz[value][0]) * real_anchors[i][0] + bb_width = np.exp(siz[value][1]) * real_anchors[i][1] + bb_height = np.exp(siz[value][2]) * real_anchors[i][2] + bb_heading = np.round(hdg[value]) + bb_yaw = ang[value] + real_anchors[i][4] + # if np.int32(bb_heading) == 0: + # bb_yaw -= np.pi + + bb_cls = np.argmax(clf[value]) + bb_conf = occ[value] + predicted_boxes.append(BBox(bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf)) + predicted_boxes_list.append([bb_x, bb_y, bb_z, bb_length, bb_width, bb_height, + bb_yaw, bb_heading, bb_cls, bb_conf]) + + return predicted_boxes, np.array(predicted_boxes_list) + +def convert_boxes_to_list(set_boxes): + # (B, N) + batch_predicted_boxes_list = [] + for batch_idx in range(len(set_boxes)): + predicted_boxes_list = [] + + for box in set_boxes[batch_idx]: + + predicted_boxes_list.append([box.x, box.y, box.z, box.length, box.width, box.height, + box.yaw, box.heading, box.cls, box.conf]) + + batch_predicted_boxes_list.append(predicted_boxes_list) + return batch_predicted_boxes_list + + +def focal_loss_checker(y_true, y_pred, n_occs=-1): + y_true = np.stack(np.where(y_true == 1)) + if n_occs == -1: + n_occs = y_true.shape[1] + occ_thr = np.sort(y_pred.flatten())[-n_occs] + y_pred = np.stack(np.where(y_pred >= occ_thr)) + p = 0 + for gt in range(y_true.shape[1]): + for pr in range(y_pred.shape[1]): + if np.all(y_true[:, gt] == y_pred[:, pr]): + p += 1 + break + print("#matched gt: ", p, " #unmatched gt: ", y_true.shape[1] - p, " #unmatched pred: ", y_pred.shape[1] - p, + " occupancy threshold: ", occ_thr) diff --git a/mtr_point_pillars_evaluation_v1.py b/mtr_point_pillars_evaluation_v1.py new file mode 100644 index 0000000..2c0acd5 --- /dev/null +++ b/mtr_point_pillars_evaluation_v1.py @@ -0,0 +1,164 @@ + + +import os +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "1" + +from glob import glob +import numpy as np +import tensorflow as tf +from mtr_processors_v1 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_mtr_v1 import generate_bboxes_from_pred +from inference_utils_mtr_v1 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array, convert_boxes_to_list +# from readers import KittiDataReader +from config_mtr_v1 import Parameters +from network_v2_2 import build_point_pillar_graph +from datetime import datetime + +from det3d.kitti_dataset.utils.evaluation import save_kitti_format, save_kitti_format_for_evaluation + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/Project4-MTR" # TODO make main arg +MODEL_ROOT = "./logs_Pedestrian_MTR_No_Early_Stopping_wo_Aug_with_val" +PC_STATISTICS_PATH = "/home/tan/tjtanaa/det3d/det3d/mtr_dataset/point_cloud_statistics" + +occ_threshold = 0.5 +nms_iou_thr=0.5 +# KITTI_EVALUATION_OUTPUT = os.path.join(MODEL_ROOT, "Evaluation") +# if not os.path.exists(KITTI_EVALUATION_OUTPUT): +# os.makedirs(KITTI_EVALUATION_OUTPUT) + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + pillar_net = build_point_pillar_graph(params) + pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + point_cloud_statistics_path=PC_STATISTICS_PATH, + random_select=False, + npoints=8000, split='test', classes=list(params.classes_map.keys())) + inference_duration = [] + sample_index = 0 # has to be controlled manually to ensure that the sequence number is continuous + + for batch_idx in range(0,len(validation_gen)): + [pillars, voxels], [occupancy_, position_, size_, angle_, heading_], [pts_input, gt_boxes3d] = validation_gen[batch_idx] + + start=datetime.now() + + occupancy, position, size, angle, heading = pillar_net.predict([pillars, voxels]) + + inference_duration.append( datetime.now()-start) + + classification = np.zeros(shape=np.array(occupancy).shape) + classification_ = classification + + set_boxes, confidences = [], [] + loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 + print("batch_idx ", batch_idx, " has ", loop_range, "batch sample", " with occupancy sum: ", np.sum(occupancy)) + for i in range(loop_range): + set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + heading[i], + classification[i], params.anchor_dims, occ_threshold=occ_threshold) + + + _, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], + heading_[i], + classification_[i], params.anchor_dims, occ_threshold=occ_threshold) + + # gt_boxes3d_ = gt_boxes3d[i] + gt_boxes3d_ = decoded_gt_boxes3d + + # print(gt_boxes3d_.shape) + if(len(gt_boxes3d_) == 0): + gt_bbox_params_list = [] + else: + gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], + gt_boxes3d_[:,1], gt_boxes3d_[:,2] , + gt_boxes3d_[:,0], + gt_boxes3d_[:,6]], axis=1) + + + gt_bbox_params_list = gt_bbox_params.tolist() + # gt_bbox_params_list = [] + for k in range(len(gt_bbox_params_list)): + msg = "%.5f, %s, %.5f"%(decoded_gt_boxes3d[k,9], params.map_classes[int(decoded_gt_boxes3d[k,8])], decoded_gt_boxes3d[k,6]) + # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) + gt_bbox_params_list[k].append("Green") + # gt_bbox_params_list[k].append("1.0") + gt_bbox_params_list[k].append(msg) + + if len(set_box) > 0: + + + # NMS + # set_box + # print("start nms") + confidence = [float(box.conf) for box in set_box] + nms_boxes = rotational_nms([set_box], [confidence], occ_threshold=occ_threshold, nms_iou_thr=nms_iou_thr) + + predicted_boxes3d_list = convert_boxes_to_list(nms_boxes) + + predicted_boxes3d = np.array(predicted_boxes3d_list[0]) + predicted_boxes3d_ = predicted_boxes3d + + print("sample_: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") + + bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , + predicted_boxes3d_[:,0], + predicted_boxes3d_[:,6]], axis=1) + # print("z ", predicted_boxes3d[:,2]) + + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(predicted_boxes3d.shape[0]): + msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) + + + # # save as kitti format for evaluation + # cur_sample_id = batch_idx * params.batch_size + i + # sample_file_name = validation_gen.sample_id_list[cur_sample_id] + # calib = sample[i]['calib'] + # # cur_boxes3d = cur_boxes3d.cpu().numpy() + + # cur_boxes3d_xyz = calib.lidar_to_rect(predicted_boxes3d[:, 0:3]) + + # cur_boxes3d = np.concatenate(( + # cur_boxes3d_xyz[:,0,np.newaxis], # 0 x + # cur_boxes3d_xyz[:,1,np.newaxis] + predicted_boxes3d[:,5,np.newaxis] / 2, # 1 y + # cur_boxes3d_xyz[:,2,np.newaxis], # 2 z + # predicted_boxes3d[:,5,np.newaxis], # 3 l # same as the original label + # predicted_boxes3d[:,4,np.newaxis], # 4 w # same as the original label + # predicted_boxes3d[:,3,np.newaxis], # 5 h # same as the original label + # -predicted_boxes3d[:,6,np.newaxis], # 6 ry + # ), axis=1) + # cur_scores_raw = predicted_boxes3d[:,-1] + # image_shape = validation_gen.get_image_shape(sample_file_name) + # labels_obj = validation_gen.get_label(sample_file_name) + # classes = ['Car' for i in range(len(predicted_boxes3d))] + # save_kitti_format_for_evaluation(sample_index, calib, cur_boxes3d, KITTI_EVALUATION_OUTPUT, cur_scores_raw, image_shape, classes, labels_obj) + sample_index += 1 + + coor = pts_input[i][:,[1,2,0]] + Converter.compile("evaluation_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + # print("Average runtime speed: ", np.mean(inference_duration[20:])) + diff --git a/mtr_point_pillars_prediction_v1.py b/mtr_point_pillars_prediction_v1.py new file mode 100644 index 0000000..70623d6 --- /dev/null +++ b/mtr_point_pillars_prediction_v1.py @@ -0,0 +1,138 @@ + + +import os +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "1" + +from glob import glob +import numpy as np +import tensorflow as tf +from mtr_processors_v1 import CustomDataGenerator, AnalyseCustomDataGenerator +from inference_utils_mtr_v1 import generate_bboxes_from_pred +from inference_utils_mtr_v1 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array, convert_boxes_to_list +# from readers import KittiDataReader +from config_mtr_v1 import Parameters +from network_v2_2 import build_point_pillar_graph +from datetime import datetime + +from det3d.kitti_dataset.utils.evaluation import save_kitti_format, save_kitti_format_for_evaluation + +from point_viz.converter import PointvizConverter + +DATA_ROOT = "/media/data3/tjtanaa/Project4-MTR" # TODO make main arg +MODEL_ROOT = "./logs_Pedestrian_MTR_No_Early_Stopping_wo_Aug_with_val" +PC_STATISTICS_PATH = "/home/tan/tjtanaa/det3d/det3d/mtr_dataset/point_cloud_statistics" + +occ_threshold = 0.3 +nms_iou_thr=0.1 +# KITTI_EVALUATION_OUTPUT = os.path.join(MODEL_ROOT, "Evaluation") +# if not os.path.exists(KITTI_EVALUATION_OUTPUT): +# os.makedirs(KITTI_EVALUATION_OUTPUT) + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + +if __name__ == "__main__": + + params = Parameters() + pillar_net = build_point_pillar_graph(params) + pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + pillar_net.summary() + + # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" + save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) + # Initialize and setup output directory. + Converter = PointvizConverter(save_viz_path) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + + real_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + point_cloud_statistics_path=PC_STATISTICS_PATH, + random_select=False, + npoints=8000, split='real', classes=list(params.classes_map.keys())) + inference_duration = [] + sample_index = 0 # has to be controlled manually to ensure that the sequence number is continuous + print("total number of batch: ", len(real_gen)) + for batch_idx in range(0, len(real_gen)): + [pillars, voxels], [pts_input] = real_gen[batch_idx] + + start=datetime.now() + + occupancy, position, size, angle, heading = pillar_net.predict([pillars, voxels]) + + inference_duration.append( datetime.now()-start) + + classification = np.zeros(shape=np.array(occupancy).shape) + classification_ = classification + + set_boxes, confidences = [], [] + loop_range = occupancy.shape[0] if len(occupancy.shape) == 4 else 1 + print("batch_idx ", batch_idx, " has ", loop_range, "batch sample", " with occupancy sum: ", np.sum(occupancy)) + for i in range(loop_range): + set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], + heading[i], + classification[i], params.anchor_dims, occ_threshold=occ_threshold) + + gt_bbox_params_list = [] + if len(set_box) > 0: + + + # NMS + # set_box + # print("start nms") + confidence = [float(box.conf) for box in set_box] + nms_boxes = rotational_nms([set_box], [confidence], occ_threshold=occ_threshold, nms_iou_thr=nms_iou_thr) + + predicted_boxes3d_list = convert_boxes_to_list(nms_boxes) + + predicted_boxes3d = np.array(predicted_boxes3d_list[0]) + predicted_boxes3d_ = predicted_boxes3d + + print("sample_: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") + + bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], + predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , + predicted_boxes3d_[:,0], + predicted_boxes3d_[:,6]], axis=1) + # print("z ", predicted_boxes3d[:,2]) + + bbox_params_list = bbox_params.tolist() + # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] + for k in range(predicted_boxes3d.shape[0]): + msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) + bbox_params_list[k].append("Magenta") + bbox_params_list[k].append(msg) + # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) + gt_bbox_params_list.append(bbox_params_list[k]) + + + # # save as kitti format for evaluation + # cur_sample_id = batch_idx * params.batch_size + i + # sample_file_name = validation_gen.sample_id_list[cur_sample_id] + # calib = sample[i]['calib'] + # # cur_boxes3d = cur_boxes3d.cpu().numpy() + + # cur_boxes3d_xyz = calib.lidar_to_rect(predicted_boxes3d[:, 0:3]) + + # cur_boxes3d = np.concatenate(( + # cur_boxes3d_xyz[:,0,np.newaxis], # 0 x + # cur_boxes3d_xyz[:,1,np.newaxis] + predicted_boxes3d[:,5,np.newaxis] / 2, # 1 y + # cur_boxes3d_xyz[:,2,np.newaxis], # 2 z + # predicted_boxes3d[:,5,np.newaxis], # 3 l # same as the original label + # predicted_boxes3d[:,4,np.newaxis], # 4 w # same as the original label + # predicted_boxes3d[:,3,np.newaxis], # 5 h # same as the original label + # -predicted_boxes3d[:,6,np.newaxis], # 6 ry + # ), axis=1) + # cur_scores_raw = predicted_boxes3d[:,-1] + # image_shape = validation_gen.get_image_shape(sample_file_name) + # labels_obj = validation_gen.get_label(sample_file_name) + # classes = ['Car' for i in range(len(predicted_boxes3d))] + # save_kitti_format_for_evaluation(sample_index, calib, cur_boxes3d, KITTI_EVALUATION_OUTPUT, cur_scores_raw, image_shape, classes, labels_obj) + # sample_index += 1 + + coor = pts_input[i][:,[1,2,0]] + Converter.compile("real_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], + bbox_params=gt_bbox_params_list) + # print("Average runtime speed: ", np.mean(inference_duration[20:])) + diff --git a/point_pillars_training_custom_run_v2.py b/mtr_point_pilllars_training_run_v1.py similarity index 59% rename from point_pillars_training_custom_run_v2.py rename to mtr_point_pilllars_training_run_v1.py index 1293f32..947ec4b 100644 --- a/point_pillars_training_custom_run_v2.py +++ b/mtr_point_pilllars_training_run_v1.py @@ -1,53 +1,64 @@ + import os +os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"] = "2" import time import numpy as np import tensorflow as tf from glob import glob # from config import Parameters -from config_v2 import Parameters -from loss import PointPillarNetworkLoss -from network import build_point_pillar_graph -# from processors import SimpleDataGenerator -# from custom_processors import CustomDataGenerator -from point_pillars_custom_processors_v2 import CustomDataGenerator -from readers import KittiDataReader +from config_mtr_v1 import Parameters +from loss_v2_2 import PointPillarNetworkLoss +from network_v2_2 import build_point_pillar_graph +from mtr_processors_v1 import CustomDataGenerator +# from readers import KittiDataReader + +from det3d.mtr_dataset import MTRDatasetBase # from point_viz.converter import PointvizConverter tf.get_logger().setLevel("ERROR") -# DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg -DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" # TODO make main arg -# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" -# MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" -MODEL_ROOT = "./logs_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" -os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "1" -if __name__ == "__main__": +# DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg +DATA_ROOT = "/media/data3/tjtanaa/Project4-MTR" # TODO make main arg +MODEL_ROOT = "./logs_Pedestrian_MTR_No_Early_Stopping_wo_Aug_with_val" +PC_STATISTICS_PATH = "/home/tan/tjtanaa/det3d/det3d/mtr_dataset/point_cloud_statistics" +# from tensorflow.python.client import device_lib +# print(device_lib.list_local_devices()) +# exit() +if __name__ == "__main__": params = Parameters() + # gpus = tf.config.experimental.list_physical_devices('GPU') + pillar_net = build_point_pillar_graph(params) # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) - + pillar_net.summary() + # exit() loss = PointPillarNetworkLoss(params) optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate, decay=params.decay_rate) pillar_net.compile(optimizer, loss=loss.losses()) + - # gt_database_dir = os.path.join(DATA_ROOT, "gt_database") - gt_database_dir = None + train_dataset = MTRDatasetBase(DATA_ROOT, 'train', PC_STATISTICS_PATH) + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") + # gt_database_dir = None training_gen = CustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, - npoints=20000, split='train_val', classes=list(params.classes_map.keys()), - random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) + point_cloud_statistics_path=PC_STATISTICS_PATH, + npoints=8000, split='train', classes=list(params.classes_map.keys()), + random_select=True, gt_database_dir=gt_database_dir, aug_hard_ratio=0.7) - # validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, - # npoints=20000, split='val', classes=list(params.classes_map.keys())) + validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, + point_cloud_statistics_path=PC_STATISTICS_PATH, + npoints=8000, split='test', classes=list(params.classes_map.keys())) # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" @@ -68,7 +79,7 @@ callbacks = [ tf.keras.callbacks.TensorBoard(log_dir=log_dir), tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(log_dir, "model.h5"), - monitor='loss', save_best_only=True), + monitor='val_loss', save_best_only=True), tf.keras.callbacks.LearningRateScheduler( lambda epoch, lr: lr * 0.8 if ((epoch % epoch_to_decay == 0) and (epoch != 0)) else lr, verbose=True), # tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'), @@ -76,13 +87,16 @@ try: pillar_net.fit(training_gen, - # validation_data = validation_gen, + validation_data = validation_gen, steps_per_epoch=len(training_gen), callbacks=callbacks, use_multiprocessing=True, + max_queue_size = 16, epochs=int(params.total_training_epochs), workers=6) except KeyboardInterrupt: model_str = "interrupted_%s.h5" % time.strftime("%Y%m%d-%H%M%S") pillar_net.save(os.path.join(log_dir, model_str)) print("Interrupt. Saving output to %s" % os.path.join(os.getcwd(), log_dir[1:], model_str)) + + diff --git a/point_pillars_custom_processors_v2.py b/mtr_processors_v1.py similarity index 62% rename from point_pillars_custom_processors_v2.py rename to mtr_processors_v1.py index ae2b17c..3a83f07 100644 --- a/point_pillars_custom_processors_v2.py +++ b/mtr_processors_v1.py @@ -4,17 +4,21 @@ from tensorflow.python.keras.utils.data_utils import Sequence -from config_v2 import Parameters +from config_mtr_v1 import Parameters # from point_pillars import createPillars, createPillarsTarget from point_pillars_v2 import createPillars, createPillarsTarget # from readers import DataReader, Label3D from sklearn.utils import shuffle import sys +from det3d.mtr_dataset import MTRDatasetBase +from det3d.mtr_dataset.utils import mtr_utils -from det3d.pc_kitti_dataset import PCKittiAugmentedDataset +# from point_viz.converter import PointvizConverter +from datetime import datetime -from point_viz.converter import PointvizConverter +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period def select_best_anchors(arr): dims = np.indices(arr.shape[1:]) @@ -40,7 +44,7 @@ def make_point_pillars(self, points: np.ndarray): assert points.ndim == 2 assert points.shape[1] == 4 assert points.dtype == np.float32 - + # start=datetime.now() pillars, indices = createPillars(points, self.max_points_per_pillar, self.max_pillars, @@ -53,6 +57,7 @@ def make_point_pillars(self, points: np.ndarray): self.z_min, self.z_max, False) + # print("Create pillar takes : ", datetime.now()-start) return pillars, indices @@ -70,9 +75,6 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): # filter labels by classes (cars, pedestrians and Trams) # Label has 4 properties (Classification (0th index of labels file), # centroid coordinates, dimensions, yaw) - # labels = list(filter(lambda x: x.classification in self.classes, labels)) - - if len(gt_boxes_3d) == 0: pX, pY = int(self.Xn / self.downscaling_factor), int(self.Yn / self.downscaling_factor) @@ -82,12 +84,6 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): np.zeros((pX, pY, a), dtype='float32'), np.zeros((pX, pY, a, self.nb_classes), dtype='float64') # For each label file, generate these properties except for the Don't care class - - # target_positions = np.array([label.centroid for label in labels], dtype=np.float32) - # target_dimension = np.array([label.dimension for label in labels], dtype=np.float32) - # target_yaw = np.array([label.yaw for label in labels], dtype=np.float32) - # target_class = np.array([self.classes[label.classification] for label in labels], dtype=np.int32) - target_positions = gt_boxes_3d[:,:3] target_dimension = gt_boxes_3d[:,3:6] # don't have to translate again target_yaw = gt_boxes_3d[:, 6] @@ -101,6 +97,8 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): assert np.all(target_yaw >= -np.pi) & np.all(target_yaw <= np.pi) assert len(target_positions) == len(target_dimension) == len(target_yaw) == len(target_class) + # start=datetime.now() + target, pos, neg = createPillarsTarget(target_positions, target_dimension, target_yaw, @@ -121,39 +119,41 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): self.z_min, self.z_max, False) + + # print("Create target takes : ", datetime.now()-start) + self.pos_cnt += pos self.neg_cnt += neg # return a merged target view for all objects in the ground truth and get categorical labels - # print("target.shape: ", target.shape) + sel = select_best_anchors(target) - # print("self.shape: ", sel[...,0].shape) ohe = tf.keras.utils.to_categorical(sel[..., 9], num_classes=self.nb_classes, dtype='float64') # print("self.shape: ", sel[...,0].shape) - # print("ohe.shape: ", ohe.shape) - # print("sel[8].shape: ",sel[..., 8].shape) + return sel[..., 0], sel[..., 1:4], sel[..., 4:7], sel[..., 7], sel[..., 8], ohe -class CustomDataGenerator(DataProcessor, Sequence, PCKittiAugmentedDataset): +class CustomDataGenerator(DataProcessor, Sequence, MTRDatasetBase): """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ - def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + def __init__(self, batch_size: int, root_dir:str, point_cloud_statistics_path: str, + npoints:int =16384, split: str ='train', classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): super(CustomDataGenerator, self).__init__( - batch_size=batch_size, root_dir=root_dir, - npoints=npoints, split=split, classes=classes, - random_select=random_select, gt_database_dir=gt_database_dir, - aug_hard_ratio=aug_hard_ratio, **kwargs + root_dir = root_dir, + split = split, + point_cloud_statistics_path = point_cloud_statistics_path, + **kwargs + # batch_size=batch_size, root_dir=root_dir, + # npoints=npoints, split=split, classes=classes, + # random_select=random_select, gt_database_dir=gt_database_dir, + # aug_hard_ratio=aug_hard_ratio, **kwargs ) - # self.data_reader = data_reader self.batch_size = batch_size - self.sample_id_list=self.get_sample_id_list() - # self.split = split - # print("CustomDataGenerator: " ,self.split) def get_sample(self, index): @@ -161,11 +161,10 @@ def get_sample(self, index): def __len__(self): - return len(self.sample_id_list) // self.batch_size + return len(self.sample_list) // self.batch_size def __getitem__(self, batch_id: int): file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) - # print("inside getitem") pillars = [] voxels = [] occupancy = [] @@ -178,22 +177,11 @@ def __getitem__(self, batch_id: int): for i in file_ids: - sample = self.get_sample(i) - # For each file, dividing the space into a x-y grid to create pillars - pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) - pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) + point_cloud = self.get_lidar_without_background(i) - gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) + pts_features = point_cloud[:, 3:] - gt_boxes3d = np.concatenate(( - gt_boxes3d_xyz[:,0,np.newaxis], # 0 x - gt_boxes3d_xyz[:,1,np.newaxis], # 1 y - gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z - sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label - sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label - sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label - -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry - ), axis=1) + pts_input = np.concatenate([point_cloud[:,:3], pts_features[:,1,np.newaxis]], axis=1) # Voxels are the pillar ids pillars_, voxels_ = self.make_point_pillars(pts_input) @@ -201,10 +189,34 @@ def __getitem__(self, batch_id: int): pillars.append(pillars_) voxels.append(voxels_) + + + obj_list = self.get_label(i) # are labels + + gt_boxes3d = np.zeros((obj_list.__len__(), 7), dtype=np.float32) + gt_bbox_params_list = [] + for k, obj in enumerate(obj_list): + gt_boxes3d[k, 0:3], gt_boxes3d[k, 3], gt_boxes3d[k, 4], gt_boxes3d[k, 5], gt_boxes3d[k, 6] \ + = obj.pos, obj.h, obj.w, obj.l, limit_period(obj.ry, offset=0.5, period=2*np.pi) # mtr format + # = obj.pos, obj.h, obj.w, obj.l, obj.ry # kitti + + # print(bboxes3d_[:,:3].shape) + invalid_region_mask = self._get_invalid_region_mask(gt_boxes3d[:,:3]) + gt_boxes3d = gt_boxes3d[~invalid_region_mask,:] + + gt_boxes3d = np.concatenate(( + gt_boxes3d[:,0,np.newaxis], # 0 x + gt_boxes3d[:,1,np.newaxis], # 1 y + gt_boxes3d[:,2,np.newaxis], # 2 z + gt_boxes3d[:,5,np.newaxis], # 3 l # same as the original label + gt_boxes3d[:,4,np.newaxis], # 4 w # same as the original label + gt_boxes3d[:,3,np.newaxis], # 5 h # same as the original label + gt_boxes3d[:,6,np.newaxis], # 6 ry + ), axis=1) - if self.split=='train' or self.split =='val': + if self.split=='train' or self.split =='test': occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( - gt_boxes3d, sample['gt_cls_type_list']) + gt_boxes3d, ['pedestrian' for i in range(len(gt_boxes3d))]) occupancy.append(occupancy_) position.append(position_) @@ -216,7 +228,7 @@ def __getitem__(self, batch_id: int): pillars = np.concatenate(pillars, axis=0) voxels = np.concatenate(voxels, axis=0) - if self.split=='train' or self.split =='val': + if self.split=='train' or self.split =='test': occupancy = np.array(occupancy) position = np.array(position) size = np.array(size) @@ -229,43 +241,39 @@ def __getitem__(self, batch_id: int): return [pillars, voxels] def on_epoch_end(self): - # print("inside epoch") - if self.split=='train' or self.split =='val': - # pass - self.sample_id_list=shuffle(self.sample_id_list) + if self.split=='train' or self.split =='test': + self.sample_list=shuffle(self.sample_list) + + class AnalyseCustomDataGenerator(CustomDataGenerator): """ Multiprocessing-safe data generator for training, validation or testing, without fancy augmentation """ - def __init__(self, batch_size: int, root_dir:str, npoints:int =16384, split: str ='train', + def __init__(self, batch_size: int, root_dir:str, point_cloud_statistics_path: str, + npoints:int =16384, split: str ='train', classes:List[str] =['Car', 'Pedestrian', 'Person_sitting'], random_select:bool =True, gt_database_dir=None, aug_hard_ratio:float=0.5, **kwargs): super(AnalyseCustomDataGenerator, self).__init__( batch_size=batch_size, root_dir=root_dir, + point_cloud_statistics_path = point_cloud_statistics_path, npoints=npoints, split=split, classes=classes, random_select=random_select, gt_database_dir=gt_database_dir, aug_hard_ratio=aug_hard_ratio, **kwargs ) - # self.data_reader = data_reader self.batch_size = batch_size - self.sample_id_list=self.get_sample_id_list() - # self.split = split - # self.split = split - # print("AnalyseCustomDataGenerator: " ,self.split) - def _get_sample(self, index): + def get_sample(self, index): return super().get_sample(index) def __len__(self): - return len(self.sample_id_list) // self.batch_size + return len(self.sample_list) // self.batch_size def __getitem__(self, batch_id: int): file_ids = range(batch_id * self.batch_size, self.batch_size * (batch_id + 1)) - # print("inside getitem") pillars = [] voxels = [] occupancy = [] @@ -274,76 +282,67 @@ def __getitem__(self, batch_id: int): angle = [] heading = [] classification = [] - pts_input_ = [] gt_boxes3d_ = [] - sample_ = [] - - # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" - # # Initialize and setup output directory. - # Converter = PointvizConverter(save_viz_path) - for i in file_ids: - # print(i) - # print(type(i)) - sample = self._get_sample(i) - # For each file, dividing the space into a x-y grid to create pillars - pts_lidar = sample['calib'].rect_to_lidar(sample['pts_rect']) - # print(pts_lidar.shape) - - pts_input = np.concatenate((pts_lidar, sample['pts_features']), axis=1) # (N, C) - gt_boxes3d_xyz = sample['calib'].rect_to_lidar(sample['gt_boxes3d'][:,:3]) - # print(gt_boxes3d_xyz.shape) - - gt_boxes3d = np.concatenate(( - gt_boxes3d_xyz[:,0,np.newaxis], # 0 x - gt_boxes3d_xyz[:,1,np.newaxis], # 1 y - gt_boxes3d_xyz[:,2,np.newaxis] + sample['gt_boxes3d'][:,3,np.newaxis] / 2, # 2 z - sample['gt_boxes3d'][:,5,np.newaxis], # 3 l # same as the original label - sample['gt_boxes3d'][:,4,np.newaxis], # 4 w # same as the original label - sample['gt_boxes3d'][:,3,np.newaxis], # 5 h # same as the original label - -sample['gt_boxes3d'][:,6,np.newaxis], # 6 ry - ), axis=1) + for i in file_ids: + point_cloud = self.get_lidar_without_background(i) - # print(type(gt_boxes3d)) - # gt_boxes3d = self.limit_yaw(gt_boxes3d) + pts_features = point_cloud[:, 3:] - # bbox_params = self.convert_labels_into_point_viz_format(gt_boxes3d) - # print(bbox_params.shape) - # Converter.compile("custom_sample_{}".format(i), coors=pts_input[:,:3], intensity=pts_input[:,3], - # bbox_params=bbox_params) - - - # exit() + pts_input = np.concatenate([point_cloud[:,:3], pts_features[:,1,np.newaxis]], axis=1) - # print(pts_input.shape) # Voxels are the pillar ids pillars_, voxels_ = self.make_point_pillars(pts_input) - # print(pillars_.shape, voxels_.shape) - # for i in range(10): - # print(pillars_[0,0,i,:]) - # print(np.sum(pillars_ > 0)) - # exit() - pillars.append(pillars_) voxels.append(voxels_) - # print(sample['gt_cls_type_list']) - # print("split: ", self.split) - if self.split=='train' or self.split =='val': - # print(len(gt_boxes3d), ", ", len(sample['gt_cls_type_list'])) - if (len(gt_boxes3d) == 0): - print("file id: ", i, " has zero gt label") - occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( - gt_boxes3d, sample['gt_cls_type_list']) - # print(len(a)) - # if + + + + if self.split=='train' or self.split =='test': + obj_list = self.get_label(i) # are labels + + gt_boxes3d = np.zeros((obj_list.__len__(), 7), dtype=np.float32) + # gt_bbox_params_list = [] + for k, obj in enumerate(obj_list): + gt_boxes3d[k, 0:3], gt_boxes3d[k, 3], gt_boxes3d[k, 4], gt_boxes3d[k, 5], gt_boxes3d[k, 6] \ + = obj.pos, obj.h, obj.w, obj.l, limit_period(obj.ry, offset=0.5, period=2*np.pi) # mtr format + # = obj.pos, obj.h, obj.w, obj.l, obj.ry # kitti + + # print(bboxes3d_[:,:3].shape) + invalid_region_mask = self._get_invalid_region_mask(gt_boxes3d[:,:3]) + gt_boxes3d = gt_boxes3d[~invalid_region_mask,:] + + # for k in range(len(gt_boxes3d)): + # gt_bbox_params = [gt_boxes3d[k, 5], gt_boxes3d[k, 3], gt_boxes3d[k, 4], + # gt_boxes3d[k, 1], gt_boxes3d[k, 2], gt_boxes3d[k, 0], + # gt_boxes3d[k, 6]] - # print(occupancy_.shape, position_.shape, size_.shape, angle_.shape, heading_.shape, classification_.shape) + # gt_bbox_params_list.append(gt_bbox_params) + + + + # if gt_boxes3d.__len__() == 0: + # print('No gt object') + # continue + + gt_boxes3d = np.concatenate(( + gt_boxes3d[:,0,np.newaxis], # 0 x + gt_boxes3d[:,1,np.newaxis], # 1 y + gt_boxes3d[:,2,np.newaxis], # 2 z + gt_boxes3d[:,5,np.newaxis], # 3 l # same as the original label + gt_boxes3d[:,4,np.newaxis], # 4 w # same as the original label + gt_boxes3d[:,3,np.newaxis], # 5 h # same as the original label + gt_boxes3d[:,6,np.newaxis], # 6 ry + ), axis=1) + + occupancy_, position_, size_, angle_, heading_, classification_ = self.make_ground_truth( + gt_boxes3d, ['pedestrian' for i in range(len(gt_boxes3d))]) occupancy.append(occupancy_) position.append(position_) @@ -352,29 +351,29 @@ def __getitem__(self, batch_id: int): heading.append(heading_) classification.append(classification_) - sample_.append(sample) gt_boxes3d_.append(gt_boxes3d) pts_input_.append(pts_input) + elif self.split=='real': + pts_input_.append(pts_input) - # exit() pillars = np.concatenate(pillars, axis=0) voxels = np.concatenate(voxels, axis=0) - if self.split=='train' or self.split =='val': + if self.split=='train' or self.split =='test': occupancy = np.array(occupancy) position = np.array(position) size = np.array(size) angle = np.array(angle) heading = np.array(heading) classification = np.array(classification) - # return [pillars, voxels], [occupancy, position, size, angle, heading, classification], [pts_input_, gt_boxes3d_, sample_] - return [pillars, voxels], [occupancy, position, size, angle, heading], [pts_input_, gt_boxes3d_, sample_] # new network + # return [pillars, voxels], [occupancy, position, size, angle, heading, classification] # network + return [pillars, voxels], [occupancy, position, size, angle, heading], [pts_input_, gt_boxes3d_] # network_v2 + elif self.split=='real': + return [pillars, voxels], [pts_input_] else: return [pillars, voxels] def on_epoch_end(self): - # print("inside epoch") - if self.split=='train' or self.split =='val': - self.sample_id_list=shuffle(self.sample_id_list) - \ No newline at end of file + if self.split=='train' or self.split =='test': + self.sample_list=shuffle(self.sample_list) \ No newline at end of file diff --git a/network_v2.py b/network_v2.py index 4e1deed..1fba5e4 100644 --- a/network_v2.py +++ b/network_v2.py @@ -112,8 +112,8 @@ def correct_batch_indices(tensor, batch_size): heading = tf.keras.layers.Conv2D(nb_anchors, (1, 1), name="heading/conv2d", activation="sigmoid")(concat) - # clf = tf.keras.layers.Conv2D(nb_anchors * nb_classes, (1, 1), name="clf/conv2d")(concat) - # clf = tf.keras.layers.Reshape(tuple(i // 2 for i in image_size) + (nb_anchors, nb_classes), name="clf/reshape")(clf) + clf = tf.keras.layers.Conv2D(nb_anchors * nb_classes, (1, 1), name="clf/conv2d")(concat) + clf = tf.keras.layers.Reshape(tuple(i // 2 for i in image_size) + (nb_anchors, nb_classes), name="clf/reshape")(clf) pillar_net = tf.keras.models.Model([input_pillars, input_indices], [occ, loc, size, angle, heading]) # print(pillar_net.summary()) diff --git a/network_v2_2.py b/network_v2_2.py index f30b6aa..7298525 100644 --- a/network_v2_2.py +++ b/network_v2_2.py @@ -11,10 +11,14 @@ def build_point_pillar_graph(params: Parameters): max_points = int(params.max_points_per_pillar) nb_features = int(params.nb_features) nb_channels = int(params.nb_channels) - batch_size = int(params.batch_size) + batch_size = int(params.batch_size) image_size = tuple([params.Xn, params.Yn]) nb_classes = int(params.nb_classes) nb_anchors = len(params.anchor_dims) + num_gpus = int(params.num_gpus) + # batch_size = batch_size // num_gpus + + # print(batch_size) if tf.keras.backend.image_data_format() == "channels_first": raise NotImplementedError @@ -24,10 +28,12 @@ def build_point_pillar_graph(params: Parameters): input_pillars = tf.keras.layers.Input(input_shape, batch_size=batch_size, name="pillars/input") input_indices = tf.keras.layers.Input((max_pillars, 3), batch_size=batch_size, name="pillars/indices", dtype=tf.int32) + # print(batch_size, input_indices.shape, input_pillars.shape) def correct_batch_indices(tensor, batch_size): - array = np.zeros((batch_size, max_pillars, 3), dtype=np.float32) - for i in range(batch_size): + array = np.zeros((batch_size//num_gpus, max_pillars, 3), dtype=np.float32) + # print(batch_size, array.shape, input_pillars.shape) + for i in range(batch_size//num_gpus): array[i, :, 0] = i return tensor + tf.constant(array, dtype=tf.int32) diff --git a/point_pillars_custom_prediction.py b/point_pillars_custom_prediction.py deleted file mode 100644 index 43e1003..0000000 --- a/point_pillars_custom_prediction.py +++ /dev/null @@ -1,157 +0,0 @@ -import os -from glob import glob -import numpy as np -import tensorflow as tf -# from processors import SimpleDataGenerator -# from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator -from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator -from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator -from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array -from readers import KittiDataReader -from config_v2 import Parameters -from network import build_point_pillar_graph - - -from point_viz.converter import PointvizConverter - -DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" -MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" - -os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "0" - - -def limit_period(val, offset=0.5, period=np.pi): - return val - np.floor(val / period + offset) * period - -if __name__ == "__main__": - - params = Parameters() - pillar_net = build_point_pillar_graph(params) - pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) - pillar_net.summary() - - exit() - # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_prediction_multiprocessing" - save_viz_path = os.path.join("/home/tan/tjtanaa/PointPillars/visualization", MODEL_ROOT.split('/')[-1]) - # Initialize and setup output directory. - Converter = PointvizConverter(save_viz_path) - - - - - - - gt_database_dir = os.path.join(DATA_ROOT, "gt_database") - - # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, - # npoints=20000, split='train', classes=list(params.classes_map.keys()), - # random_select=True, gt_database_dir=None, aug_hard_ratio=0.7) - - validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, - npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) - # validation_gen = AnalyseCustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, - # npoints=20000, split='val',random_select=False, classes=list(params.classes_map.keys())) - - for batch_idx in range(0,20): - [pillars, voxels], [occupancy_, position_, size_, angle_, heading_, classification_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] - - occupancy, position, size, angle, heading, classification = pillar_net.predict([pillars, voxels]) - - # angle = limit_period(angle, offset=0.5, period=2*np.pi) - - - # occupancy[:,:,:,:2] = 0 - - # print(occupancy.shape) - # exit() - - set_boxes, confidences = [], [] - loop_range = occupancy_.shape[0] if len(occupancy_.shape) == 4 else 1 - for i in range(loop_range): - set_box, predicted_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy[i], position[i], size[i], angle[i], - heading[i], - classification[i], params.anchor_dims, occ_threshold=0.5) - - - _, decoded_gt_boxes3d = generate_bboxes_from_pred_and_np_array(occupancy_[i], position_[i], size_[i], angle_[i], - heading_[i], - classification_[i], params.anchor_dims, occ_threshold=0.4) - - # gt_boxes3d_ = gt_boxes3d[i] - gt_boxes3d_ = decoded_gt_boxes3d - - # print(gt_boxes3d_.shape) - gt_bbox_params = np.stack([gt_boxes3d_[:,3], gt_boxes3d_[:,5], gt_boxes3d_[:,4], - gt_boxes3d_[:,1], gt_boxes3d_[:,2] , - gt_boxes3d_[:,0], - gt_boxes3d_[:,6]], axis=1) - - - gt_bbox_params_list = gt_bbox_params.tolist() - # gt_bbox_params_list = [] - for k in range(len(gt_bbox_params_list)): - msg = "%.5f, %s, %.5f"%(decoded_gt_boxes3d[k,9], params.map_classes[int(decoded_gt_boxes3d[k,8])], decoded_gt_boxes3d[k,6]) - # msg = "%.5f, %.5f"%(gt_bbox_params_list[k][3],gt_bbox_params_list[k][5]) - gt_bbox_params_list[k].append("Green") - # gt_bbox_params_list[k].append("1.0") - gt_bbox_params_list[k].append(msg) - - if len(set_box) > 0: - predicted_boxes3d_ = predicted_boxes3d - # bbox_params = validation_gen.convert_predictions_into_point_viz_format(predicted_boxes3d[:,[1, 2, 0, 5, 3, 4, 6 ]]) - - print("batch_idx: ", batch_idx * params.batch_size + i, " has ", predicted_boxes3d_.shape, "predictions") - # print(predicted_boxes3d_) - # print(size[i]) - - bbox_params = np.stack([predicted_boxes3d_[:,3], predicted_boxes3d_[:,5], predicted_boxes3d_[:,4], - predicted_boxes3d_[:,1], predicted_boxes3d_[:,2] , - predicted_boxes3d_[:,0], - predicted_boxes3d_[:,6]], axis=1) - - - bbox_params_list = bbox_params.tolist() - # bbox_labels_conf = [str(predicted_boxes3d[k,9]) for k in range(predicted_boxes3d.shape[0])] - for k in range(predicted_boxes3d.shape[0]): - msg = "%.5f, %s, %.5f"%(predicted_boxes3d[k,9],params.map_classes[int(predicted_boxes3d[k,8])], predicted_boxes3d[k,6]) - bbox_params_list[k].append("Magenta") - bbox_params_list[k].append(msg) - # bbox_params_list[k].append(str(predicted_boxes3d[k,9]) + "=" + params.map_classes[int(predicted_boxes3d[k,8])]) - gt_bbox_params_list.append(bbox_params_list[k]) - - coor = pts_input[i][:,[1,2,0]] - # coor[:,1] *= -1 - Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], - bbox_params=gt_bbox_params_list) - - # set_boxes.append(set_box) - # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, - # # classification, params.anchor_dims, occ_threshold=0.1)) - # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) - - # sum_bboxes = 0 - # for h in range(len(set_boxes)): - # sum_bboxes += len(set_boxes[h]) - - # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) - # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) - # exit() - # print(set_boxes[-1]) - - # # NMS - # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) - - # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) - - # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes - # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) - # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) - # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): - # print("---------- New Scenario ---------- ") - # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) - # print("---------- ------------ ---------- ") - # for gt in gt_label: - # print(gt) - # for pred in seq_boxes: - # print(pred) diff --git a/point_pillars_custom_prediction_v2_2.py b/point_pillars_custom_prediction_v2_2.py index ac942d3..090820b 100644 --- a/point_pillars_custom_prediction_v2_2.py +++ b/point_pillars_custom_prediction_v2_2.py @@ -3,8 +3,8 @@ import numpy as np import tensorflow as tf from point_pillars_custom_processors_v2_2 import CustomDataGenerator, AnalyseCustomDataGenerator -from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator -from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array +from inference_utils_v2_2 import generate_bboxes_from_pred +from inference_utils_v2_2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array from readers import KittiDataReader from config_v2_2 import Parameters from network_v2_2 import build_point_pillar_graph @@ -52,9 +52,12 @@ def limit_period(val, offset=0.5, period=np.pi): inference_duration = [] - for batch_idx in range(0,70): + for batch_idx in range(0,10): [pillars, voxels], [occupancy_, position_, size_, angle_, heading_], [pts_input, gt_boxes3d, sample] = validation_gen[batch_idx] + # 4 * 12000 * 100 * 9, 502 * 502 * 2 + + # 4 * 20000 * 4 start=datetime.now() @@ -132,34 +135,4 @@ def limit_period(val, offset=0.5, period=np.pi): # coor[:,1] *= -1 Converter.compile("val_custom_sample_{}".format(batch_idx * params.batch_size+i), coors=coor, intensity=pts_input[i][:,3], bbox_params=gt_bbox_params_list) - print("Average runtime speed: ", np.mean(inference_duration[20:])) - # set_boxes.append(set_box) - # # set_boxes.append(generate_bboxes_from_pred(occupancy, position, size, angle, heading, - # # classification, params.anchor_dims, occ_threshold=0.1)) - # # confidences.append([float(boxes.conf) for boxes in set_boxes[-1]]) - - # sum_bboxes = 0 - # for h in range(len(set_boxes)): - # sum_bboxes += len(set_boxes[h]) - - # print('Batch ', str(batch_idx) ,': Box predictions with occupancy > occ_thr: ', sum_bboxes) - # print('Scene 1: Box predictions with occupancy > occ_thr: ', len(set_boxes[0])) - # exit() - # print(set_boxes[-1]) - - # # NMS - # nms_boxes = rotational_nms(set_boxes, confidences, occ_threshold=0.7, nms_iou_thr=0.5) - - # print('Scene 1: Boxes after NMS with iou_thr: ', len(nms_boxes[0])) - - # # Do all the further operations on predicted_boxes array, which contains the predicted bounding boxes - # gt_gen = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=False) - # gt_gen0 = GroundTruthGenerator(data_reader, label_files, calibration_files, network_format=True) - # for seq_boxes, gt_label, gt0 in zip(nms_boxes, gt_gen, gt_gen0): - # print("---------- New Scenario ---------- ") - # focal_loss_checker(gt0[0], occupancy[0], n_occs=-1) - # print("---------- ------------ ---------- ") - # for gt in gt_label: - # print(gt) - # for pred in seq_boxes: - # print(pred) + print("Average runtime speed: ", np.mean(inference_duration[4:])) \ No newline at end of file diff --git a/point_pillars_custom_processors_v2_2.py b/point_pillars_custom_processors_v2_2.py index 2c0886c..656dc21 100644 --- a/point_pillars_custom_processors_v2_2.py +++ b/point_pillars_custom_processors_v2_2.py @@ -14,6 +14,7 @@ from det3d.pc_kitti_dataset import PCKittiAugmentedDataset from point_viz.converter import PointvizConverter +from datetime import datetime def select_best_anchors(arr): @@ -40,7 +41,7 @@ def make_point_pillars(self, points: np.ndarray): assert points.ndim == 2 assert points.shape[1] == 4 assert points.dtype == np.float32 - + # start=datetime.now() pillars, indices = createPillars(points, self.max_points_per_pillar, self.max_pillars, @@ -53,6 +54,7 @@ def make_point_pillars(self, points: np.ndarray): self.z_min, self.z_max, False) + # print("Create pillar takes : ", datetime.now()-start) return pillars, indices @@ -92,6 +94,8 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): assert np.all(target_yaw >= -np.pi) & np.all(target_yaw <= np.pi) assert len(target_positions) == len(target_dimension) == len(target_yaw) == len(target_class) + # start=datetime.now() + target, pos, neg = createPillarsTarget(target_positions, target_dimension, target_yaw, @@ -112,6 +116,9 @@ def make_ground_truth(self, gt_boxes_3d: Any, gt_cls_type_list: List[str]): self.z_min, self.z_max, False) + + # print("Create target takes : ", datetime.now()-start) + self.pos_cnt += pos self.neg_cnt += neg diff --git a/point_pillars_evaluation_v2_2.py b/point_pillars_evaluation_v2_2.py index 01fd5aa..88e87ae 100644 --- a/point_pillars_evaluation_v2_2.py +++ b/point_pillars_evaluation_v2_2.py @@ -5,9 +5,9 @@ import numpy as np import tensorflow as tf from point_pillars_custom_processors_v2_2 import CustomDataGenerator, AnalyseCustomDataGenerator -from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator -from inference_utils_v2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array, convert_boxes_to_list -from readers import KittiDataReader +from inference_utils_v2_2 import generate_bboxes_from_pred +from inference_utils_v2_2 import focal_loss_checker, rotational_nms, generate_bboxes_from_pred_and_np_array, convert_boxes_to_list +# from readers import KittiDataReader from config_v2_2 import Parameters from network_v2_2 import build_point_pillar_graph from datetime import datetime diff --git a/point_pillars_test.py b/point_pillars_test.py deleted file mode 100644 index 8cfa872..0000000 --- a/point_pillars_test.py +++ /dev/null @@ -1,70 +0,0 @@ -import unittest -import numpy as np -import tensorflow as tf - -from point_pillars import createPillars, createPillarsTarget, select - - -class PointPillarsTest(unittest.TestCase): - - def setUp(self): - np.random.seed(42) - xy = np.random.randint(-100, 100+1, size=(100000, 2)) - z = np.random.randint(-3, 1+1, size=(100000, 1)) - i = np.random.rand(100000) - self.arr = np.c_[xy, z, i] - assert self.arr.shape == (100000, 4) - - def test_pillar_creation(self): - pillars, indices = createPillars(self.arr, 100, 12000, 0.16, 0.16, 0, 80.64, -40.32, 40.32, -3, 1, True) - - assert pillars.shape == (1, 12000, 100, 7) - assert pillars.dtype == np.float32 - assert indices.shape == (1, 12000, 3) - assert indices.dtype == np.int32 - - session = tf.Session() - pillars = tf.constant(pillars, dtype=tf.float32) - indices = tf.constant(indices, dtype=tf.int32) - feature_map = tf.scatter_nd(indices, tf.reduce_mean(pillars, axis=2), (1, 504, 504, 7))[0] - arr, = session.run([feature_map]) - assert (arr.shape == (504, 504, 7)) - - @staticmethod - def test_pillar_target_creation(): - - dims = np.array([[3.7, 1.6, 1.4], [3.7, 1.6, 1.4], [0.8, 0.6, 1.7]], dtype=np.float32) - posn = np.array([[50, 10, 0], [20, 0, 0], [30, 5, 0]], dtype=np.float32) - yaws = np.array([0, 0, 90], dtype=np.float32) - - target = createPillarsTarget(posn, - dims, - yaws, - np.array([1, 1, 2], dtype=np.int32), - dims[[0, 2]], - np.array([0, 0], dtype=np.float32), - np.array([0, 90], dtype=np.float32), - 0.5, - 0.4, - 10, - 2, - 0.1, - 0.1, - 0, - 80, - -40, - 40, - -3, - 1, - True) - - assert target.shape == (3, 400, 400, 2, 10) - assert (target[..., 0] == 1).sum() == 83 - - selected = target[..., 0:1].argmax(axis=0) - target = select(target, selected) - assert (target.shape == (400, 400, 2, 10)) - - -if __name__ == "__main__": - unittest.main() diff --git a/point_pillars_training_custom_run_v2_2.py b/point_pillars_training_custom_run_v2_2.py index c7b4206..056002a 100644 --- a/point_pillars_training_custom_run_v2_2.py +++ b/point_pillars_training_custom_run_v2_2.py @@ -19,14 +19,16 @@ DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/" # TODO make main arg # MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_single_process" # MODEL_ROOT = "./logs_Car_Pedestrian_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val" -MODEL_ROOT = "./logs_Car_Custom_Dataset_No_Early_Stopping_wo_Aug_wo_val_new_network" +MODEL_ROOT = "./logs_Car_Custom_Dataset_No_Early_Stopping_Aug_val_new_network_multigpu" os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "1" +os.environ["CUDA_VISIBLE_DEVICES"] = "3" if __name__ == "__main__": params = Parameters() + # gpus = tf.config.experimental.list_physical_devices('GPU') + pillar_net = build_point_pillar_graph(params) # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) pillar_net.summary() @@ -37,15 +39,34 @@ pillar_net.compile(optimizer, loss=loss.losses()) + + # loss = PointPillarNetworkLoss(params) + + # optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate, decay=params.decay_rate) + + # if len(gpus)>1: + # strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()) + # with strategy.scope(): + # pillar_net = build_point_pillar_graph(params) + # # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + # pillar_net.compile(optimizer, loss=loss.losses()) + # else: + # pillar_net = build_point_pillar_graph(params) + # # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) + # pillar_net.compile(optimizer, loss=loss.losses()) + + # pillar_net.summary() + + gt_database_dir = os.path.join(DATA_ROOT, "gt_database") # gt_database_dir = None training_gen = CustomDataGenerator(batch_size=params.batch_size,root_dir = DATA_ROOT, - npoints=20000, split='train_val', classes=list(params.classes_map.keys()), + npoints=20000, split='train', classes=list(params.classes_map.keys()), random_select=True, gt_database_dir=gt_database_dir, aug_hard_ratio=0.7) validation_gen = CustomDataGenerator(batch_size=params.batch_size, root_dir=DATA_ROOT, - npoints=20000, split='train_val_test', classes=list(params.classes_map.keys())) + npoints=20000, split='val', classes=list(params.classes_map.keys())) # save_viz_path = "/home/tan/tjtanaa/PointPillars/visualization/custom_processor" @@ -78,7 +99,7 @@ steps_per_epoch=len(training_gen), callbacks=callbacks, use_multiprocessing=True, - # max_queue_size = 16, + max_queue_size = 16, epochs=int(params.total_training_epochs), workers=6) except KeyboardInterrupt: diff --git a/point_pillars_training_run.py b/point_pillars_training_run.py deleted file mode 100644 index 2dd1c27..0000000 --- a/point_pillars_training_run.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -import time -import numpy as np -import tensorflow as tf -from glob import glob - -from config import Parameters -from loss import PointPillarNetworkLoss -from network import build_point_pillar_graph -from processors import SimpleDataGenerator -# from custom_processors import CustomDataGenerator -from readers import KittiDataReader - -tf.get_logger().setLevel("ERROR") - -DATA_ROOT = "/media/data3/tjtanaa/kitti_dataset/KITTI/object/training" # TODO make main arg -MODEL_ROOT = "./logs_Car_Pedestrian_Original_2" - -os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"] = "2" - -if __name__ == "__main__": - - params = Parameters() - - pillar_net = build_point_pillar_graph(params) - # pillar_net.load_weights(os.path.join(MODEL_ROOT, "model.h5")) - - loss = PointPillarNetworkLoss(params) - - optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate, decay=params.decay_rate) - - pillar_net.compile(optimizer, loss=loss.losses()) - - data_reader = KittiDataReader() - - lidar_files = sorted(glob(os.path.join(DATA_ROOT, "velodyne", "*.bin"))) - label_files = sorted(glob(os.path.join(DATA_ROOT, "label_2", "*.txt"))) - calibration_files = sorted(glob(os.path.join(DATA_ROOT, "calib", "*.txt"))) - assert len(lidar_files) == len(label_files) == len(calibration_files), "Input dirs require equal number of files." - validation_len = int(0.3*len(label_files)) - - training_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files[:-validation_len], label_files[:-validation_len], calibration_files[:-validation_len]) - validation_gen = SimpleDataGenerator(data_reader, params.batch_size, lidar_files[-validation_len:], label_files[-validation_len:], calibration_files[-validation_len:]) - - log_dir = MODEL_ROOT - # epoch_to_decay = int( - # np.round(params.iters_to_decay / params.batch_size * int(np.ceil(float(len(label_files)) / params.batch_size)))) - - epoch_to_decay = int( - np.round(params.iters_to_decay / params.batch_size * int(len(training_gen)))) - - callbacks = [ - tf.keras.callbacks.TensorBoard(log_dir=log_dir), - tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(log_dir, "model.h5"), - monitor='val_loss', save_best_only=True), - tf.keras.callbacks.LearningRateScheduler( - lambda epoch, lr: lr * 0.8 if ((epoch % epoch_to_decay == 0) and (epoch != 0)) else lr, verbose=True), - tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'), - ] - - try: - pillar_net.fit(training_gen, - validation_data = validation_gen, - steps_per_epoch=len(training_gen), - callbacks=callbacks, - # use_multiprocessing=True, - epochs=int(params.total_training_epochs)) - # workers=6) - except KeyboardInterrupt: - model_str = "interrupted_%s.h5" % time.strftime("%Y%m%d-%H%M%S") - pillar_net.save(os.path.join(log_dir, model_str)) - print("Interrupt. Saving output to %s" % os.path.join(os.getcwd(), log_dir[1:], model_str)) diff --git a/point_pillars_visualize_input.py b/point_pillars_visualize_input.py index 090c3f5..838a5c6 100644 --- a/point_pillars_visualize_input.py +++ b/point_pillars_visualize_input.py @@ -2,12 +2,9 @@ from glob import glob import numpy as np import tensorflow as tf -# from processors import SimpleDataGenerator -# from custom_processors import CustomDataGenerator, AnalyseCustomDataGenerator from point_pillars_custom_processors_v2 import CustomDataGenerator, AnalyseCustomDataGenerator from inference_utils_v2 import generate_bboxes_from_pred, GroundTruthGenerator, focal_loss_checker from inference_utils_v2 import rotational_nms, generate_bboxes_from_pred_and_np_array -from readers import KittiDataReader from config_v2 import Parameters from network import build_point_pillar_graph