From fe089244595e814edcb71168e962aa16a3a421d6 Mon Sep 17 00:00:00 2001 From: Chris Date: Mon, 17 Jun 2024 10:21:36 +0100 Subject: [PATCH] Added python scripts to work with linux server --- Experimental/functions.ipynb | 2125 ++++++++ Linux/ltn_detection_linux.py | 2048 ++++++++ .../ltn_scoring_3_12_3_mass_process.ipynb | 4554 +++++++++++++++++ 3 files changed, 8727 insertions(+) create mode 100644 Experimental/functions.ipynb create mode 100644 Linux/ltn_detection_linux.py create mode 100644 remote_desktop/ltn_scoring_3_12_3_mass_process.ipynb diff --git a/Experimental/functions.ipynb b/Experimental/functions.ipynb new file mode 100644 index 0000000..bfa4f8e --- /dev/null +++ b/Experimental/functions.ipynb @@ -0,0 +1,2125 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "### set up python\n", + "## Library imports\n", + "import geopandas as gpd\n", + "import osmnx as ox\n", + "import networkx as nx\n", + "import momepy\n", + "import matplotlib.pyplot as plt\n", + "import folium\n", + "import pandas as pd\n", + "import overpy\n", + "from shapely.geometry import LineString\n", + "from shapely.geometry import Point\n", + "import requests\n", + "from shapely.geometry import MultiPolygon\n", + "from shapely.geometry import Polygon\n", + "import statistics\n", + "from shapely.ops import unary_union\n", + "import random\n", + "import overpy\n", + "import os \n", + "import math\n", + "from itertools import count\n", + "from collections import Counter\n", + "from sklearn.cluster import KMeans\n", + "#from osmnx._errors import InsufficientResponseError\n", + "from owslib.wms import WebMapService\n", + "from rasterio.mask import mask as rio_mask \n", + "from rasterio.features import shapes\n", + "from shapely.geometry import shape, mapping\n", + "from rasterio.io import MemoryFile\n", + "import numpy as np\n", + "from shapely.ops import unary_union\n", + "import warnings\n", + "from shapely.errors import ShapelyDeprecationWarning\n", + "\n", + "\n", + "\n", + "## Mute warnings\n", + "warnings.simplefilter(action='ignore', category=FutureWarning)\n", + "warnings.simplefilter(action='ignore', category=pd.errors.SettingWithCopyWarning)\n", + "warnings.simplefilter(action='ignore', category=ShapelyDeprecationWarning)\n", + "warnings.simplefilter(action='ignore', category=UserWarning)\n", + "\n", + "\n", + "\n", + "## Update settings\n", + "# update osmnx settings\n", + "useful_tags_ways = ox.settings.useful_tags_way + ['cycleway'] + ['bicycle'] + ['motor_vehicle'] + ['railway'] + ['tunnel'] + ['barrier'] + ['bus'] + ['access'] + ['oneway'] + ['oneway:bicycle'] + ['covered'] + ['waterway']\n", + "ox.config(use_cache=True, \n", + " log_console=True,\n", + " useful_tags_way=useful_tags_ways\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define the path to your text file\n", + "file_path = r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\test.txt'\n", + "\n", + "# Initialize an empty list to store the lines\n", + "places = []\n", + "\n", + "# Open the file and read each line\n", + "with open(file_path, 'r') as file:\n", + " for line in file:\n", + " # Strip the newline character and any surrounding whitespace\n", + " place = line.strip()\n", + " # Append the line to the list\n", + " places.append(place)\n", + "\n", + "# Print the list of places\n", + "print(\"List of places read from test.txt:\")\n", + "for place in places:\n", + " print(place)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_OS_roads():\n", + " \"\"\"\n", + " Reads in OS Open Road data from a GeoPackage file.\n", + "\n", + " Returns:\n", + " os_open_roads (GeoDataFrame): A GeoDataFrame containing road data.\n", + " \"\"\"\n", + " os_open_roads = gpd.read_file(r\"C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\oproad_gpkg_gb\\Data\\oproad_roads_only.gpkg\")\n", + " return os_open_roads\n", + "\n", + "os_open_roads = get_OS_roads()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def define_neighbourhoods(place, return_all):\n", + " # get boundary\n", + " def set_location_boundary(place):\n", + " \"\"\"\n", + " Sets up the location boundary by geocoding the given place and buffering it.\n", + "\n", + " Parameters:\n", + " place (str): The name or address of the place to geocode.\n", + "\n", + " Returns:\n", + " geopandas.GeoDataFrame: The buffered boundary of the location.\n", + " \"\"\"\n", + " # Set location and get boundary\n", + " boundary = ox.geocode_to_gdf(place)\n", + " boundary = boundary.to_crs('EPSG:27700')\n", + "\n", + " # Buffer boundary to ensure clips include riverlines which may act as borders between geographies\n", + " boundary_buffered = boundary.buffer(50)\n", + "\n", + " return boundary_buffered, boundary\n", + "\n", + " boundary_buffered, boundary = set_location_boundary(place)\n", + "\n", + "\n", + "\n", + "\n", + " \"\"\"\n", + " This code retrieves street nodes and edges for walking and driving from OpenStreetMap within our area boundary, and loads the OS Open Roads network dataset.\n", + "\n", + " Functions:\n", + " - get_street_networks: Retrieves street networks for all, walking, and driving modes within the specified boundary.\n", + " \"\"\"\n", + "\n", + " def get_OSM_street_networks(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves street networks for all, walking, and driving modes within the specified boundary.\n", + "\n", + " Parameters:\n", + " - boundary_buffered: A GeoDataFrame representing the boundary of the area of interest.\n", + "\n", + " Returns:\n", + " - all_edges: A GeoDataFrame containing the edges (streets) of the entire street network.\n", + " - all_nodes: A GeoDataFrame containing the nodes (intersections) of the entire street network.\n", + " - walk_edges: A GeoDataFrame containing the edges (streets) of the walking street network.\n", + " - walk_nodes: A GeoDataFrame containing the nodes (intersections) of the walking street network.\n", + " - drive_edges: A GeoDataFrame containing the edges (streets) of the driving street network.\n", + " - drive_nodes: A GeoDataFrame containing the nodes (intersections) of the driving street network.\n", + " - common_nodes_gdf: A GeoDataFrame containing the common nodes between the driving and walking street networks.\n", + " \"\"\"\n", + "\n", + " # Reset boundary_buffered crs for passing to OSM\n", + " boundary_buffered_4326 = boundary_buffered.to_crs('4326')\n", + "\n", + " # Get street networks\n", + " all_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='all', simplify=False)\n", + " walk_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='walk', simplify=True)\n", + " drive_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='drive', simplify=False)\n", + "\n", + " all_edges = ox.graph_to_gdfs(all_streets, nodes=False, edges=True)\n", + " all_nodes = ox.graph_to_gdfs(all_streets, nodes=True, edges=False)\n", + "\n", + " walk_edges = ox.graph_to_gdfs(walk_streets, nodes=False, edges=True)\n", + " walk_nodes = ox.graph_to_gdfs(walk_streets, nodes=True, edges=False)\n", + "\n", + " drive_edges = ox.graph_to_gdfs(drive_streets, nodes=False, edges=True)\n", + " drive_nodes = ox.graph_to_gdfs(drive_streets, nodes=True, edges=False)\n", + "\n", + " # Find the common nodes between networks\n", + " # This ensures that shortest paths between points should always be able to be calculated\n", + " common_nodes = drive_nodes.merge(walk_nodes, on='osmid', suffixes=('_drive', '_walk'))\n", + " common_nodes_gdf = gpd.GeoDataFrame(common_nodes, geometry='geometry_drive')\n", + "\n", + " return all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets\n", + "\n", + "\n", + " # get street networks\n", + " all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets = get_OSM_street_networks(boundary_buffered)\n", + " #os_open_roads = get_OS_roads() this is now got at the start of the code to avoid re-reading\n", + "\n", + "\n", + " def retrieve_osm_features(polygon, tags):\n", + " \"\"\"\n", + " Retrieves OpenStreetMap features based on the specified polygon and tags.\n", + "\n", + " Args:\n", + " polygon (Polygon): The polygon to retrieve features within.\n", + " tags (dict): The tags to filter the features.\n", + "\n", + " Returns:\n", + " GeoDataFrame: The retrieved OpenStreetMap features.\n", + " \"\"\"\n", + " try:\n", + " features = ox.features_from_polygon(polygon=polygon, tags=tags)\n", + " except Exception as e:\n", + " error_message = str(e)\n", + " if \"There are no data elements in the server response\" in error_message:\n", + " print(\"No data elements found for the specified location/tags.\")\n", + " features = gpd.GeoDataFrame() # Create an empty GeoDataFrame\n", + " else:\n", + " # Handle other exceptions here if needed\n", + " print(\"An error occurred:\", error_message)\n", + " features = None\n", + " return features\n", + "\n", + "\n", + " def get_railways(place):\n", + " \"\"\"\n", + " This retrievies and processes OpenStreetMap (OSM) railways data for a specified place.\n", + "\n", + " Args:\n", + " place (str): The name of the place to retrieve OSM features for.\n", + "\n", + " Returns:\n", + " railways (geopandas.GeoDataFrame): A GeoDataFrame containing the railways within the specified place.\n", + " \"\"\"\n", + "\n", + " # for unknown reasons, using rail = ox.graph_from_place(place, custom_filter='[\"railway\"]')\n", + " # doesn't ALWAYS retrive the full rail network, hence why multiple lines are used to achive the same result\n", + "\n", + " # Define railway types to retrieve\n", + " railway_types = [\"\", \"rail\", \"light_rail\", \"narrow_gauge\", \"subway\", \"tram\"]\n", + "\n", + " # Initialize an empty graph\n", + " combined_railways = nx.MultiDiGraph()\n", + "\n", + " for railway_type in railway_types:\n", + " try:\n", + " # Fetch the railway network for the specified type\n", + " network = ox.graph_from_place(place, simplify=False, custom_filter=f'[\"railway\"~\"{railway_type}\"]')\n", + "\n", + " # Ensure the fetched network is a MultiDiGraph\n", + " if not isinstance(network, nx.MultiDiGraph):\n", + " network = nx.MultiDiGraph(network)\n", + "\n", + " except Exception as e:\n", + " print(f\"No railway data found for '{railway_type}'.\")\n", + " network = nx.MultiDiGraph()\n", + "\n", + " # Compose the networks\n", + " combined_railways = nx.compose(combined_railways, network)\n", + "\n", + " # Convert to GeoDataFrame\n", + " railways = ox.graph_to_gdfs(combined_railways, nodes=False, edges=True)\n", + "\n", + " # Drop any other railway types that aren't needed\n", + " railways = railways.loc[(~railways[\"railway\"].isin([\"tunnel\", \"abandoned\", \"razed\", \"disused\", \"funicular\", \"monorail\", \"miniature\"]))]\n", + "\n", + " # Drop rows where any of the specified columns have values \"True\" or \"yes\"\n", + " columns_to_check = ['tunnel', 'abandoned', 'razed', 'disused', 'funicular', 'monorail', 'miniature']\n", + " railways = railways.loc[~railways[railways.columns.intersection(columns_to_check)].isin(['True', 'yes']).any(axis=1)]\n", + "\n", + " # Set railways CRS\n", + " railways = railways.to_crs('EPSG:27700')\n", + "\n", + " return railways\n", + "\n", + "\n", + "\n", + " \n", + " ## get urban footprints from GUF\n", + "\n", + " def get_guf(place):\n", + " \"\"\"\n", + " Retrieves a clipped GeoDataFrame of GUF urban areas within a specified place boundary.\n", + "\n", + " Parameters:\n", + " - place (str): The name or address of the place to retrieve urban areas for.\n", + "\n", + " Returns:\n", + " - gdf_clipped (GeoDataFrame): A GeoDataFrame containing the clipped urban areas within the specified place boundary.\n", + " \"\"\"\n", + "\n", + " # Step 1: Access the WMS Service\n", + " wms_url = 'https://geoservice.dlr.de/eoc/land/wms?GUF04_DLR_v1_Mosaic'\n", + " wms = WebMapService(wms_url, version='1.1.1')\n", + "\n", + " # Step 2: Identify the Layer with ID 102. This is the Global Urban Footprint layer GUF\n", + " for layer_name, layer in wms.contents.items():\n", + " if '102' in layer_name:\n", + " print(f\"Layer ID 102 found: {layer_name}\")\n", + "\n", + " # Assuming 'GUF04_DLR_v1_Mosaic' is the layer with ID 102\n", + " layer = 'GUF04_DLR_v1_Mosaic' # Replace with the actual layer name if different\n", + "\n", + " # Step 3: Get the polygon boundary using osmnx\n", + " boundary_gdf = ox.geocode_to_gdf(place)\n", + " boundary = boundary_gdf.to_crs('EPSG:27700')\n", + " # buffer boundary to ensure clips include riverlines which may act as borders between geographies\n", + " boundary_buffered = boundary.buffer(100)\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + " boundary_polygon = boundary_gdf.geometry[0]\n", + " wms_boundary = boundary_buffered.geometry[0]\n", + "\n", + " # Convert the polygon to a bounding box\n", + " minx, miny, maxx, maxy = wms_boundary.bounds\n", + "\n", + " # Step 4: Request the data from WMS using the bounding box\n", + " width = 1024\n", + " height = 1024\n", + " response = wms.getmap(\n", + " layers=[layer],\n", + " srs='EPSG:4326',\n", + " bbox=(minx, miny, maxx, maxy),\n", + " size=(width, height),\n", + " format='image/geotiff'\n", + " )\n", + "\n", + " # Step 5: Load the Raster Data into Rasterio\n", + " with MemoryFile(response.read()) as memfile:\n", + " with memfile.open() as src:\n", + " image = src.read(1) # Read the first band\n", + " transform = src.transform\n", + " crs = src.crs\n", + "\n", + " # Clip the raster data to the polygon\n", + " out_image, out_transform = rio_mask(src, [mapping(wms_boundary)], crop=True) # Use renamed mask function\n", + " out_meta = src.meta.copy()\n", + " out_meta.update({\"driver\": \"GTiff\",\n", + " \"height\": out_image.shape[1],\n", + " \"width\": out_image.shape[2],\n", + " \"transform\": out_transform,\n", + " \"crs\": crs})\n", + "\n", + " # Step 6: Convert Raster to Vector\n", + " mask_arr = (out_image[0] != 0).astype(np.uint8) # Assuming non-zero values are urban areas\n", + "\n", + " shapes_gen = shapes(mask_arr, mask=mask_arr, transform=out_transform)\n", + "\n", + " polygons = []\n", + " for geom, value in shapes_gen:\n", + " polygons.append(shape(geom))\n", + "\n", + " # Create a GeoDataFrame from the polygons\n", + " gdf = gpd.GeoDataFrame({'geometry': polygons}, crs=crs)\n", + "\n", + " # Step 7: Create Buffers Around Urban Areas\n", + " buffer_distance = 100 # Buffer distance in meters (adjust as needed)\n", + " gdf_buffered = gdf.copy()\n", + " gdf_buffered['geometry'] = gdf['geometry'].buffer(buffer_distance)\n", + "\n", + " # Step 8: Clip the GeoDataFrame to the boundary of the place\n", + " gdf_clipped = gpd.clip(gdf, boundary_gdf)\n", + "\n", + " return gdf_clipped\n", + "\n", + "\n", + " guf = get_guf(place)\n", + "\n", + "\n", + "\n", + "\n", + " ## get residential areas\n", + "\n", + " def get_residential_areas(polygon):\n", + " polygon = polygon.to_crs('EPSG:4326')\n", + " # Retrieve features from OpenStreetMap\n", + " features = ox.features_from_polygon(polygon.iloc[0], tags={'landuse': 'residential'})\n", + " \n", + " # Convert features to a GeoDataFrame\n", + " gdf = gpd.GeoDataFrame.from_features(features)\n", + " gdf = gdf.set_crs('EPSG:4326')\n", + " \n", + " return gdf\n", + "\n", + "\n", + " residential_areas = get_residential_areas(boundary_buffered)\n", + " \n", + " \n", + "\n", + " ## join urban foot prints and residential areas\n", + " # this is to create a single polygon of where neighbourhoods can be found within\n", + "\n", + " def join_geodataframes(gdf1, gdf2):\n", + " # Ensure both GeoDataFrames have the exact same CRS\n", + " target_crs = 'EPSG:4326' # WGS 84\n", + " gdf1 = gdf1.to_crs(target_crs)\n", + " gdf2 = gdf2.to_crs(target_crs)\n", + " \n", + " # Concatenate GeoDataFrames\n", + " joined_gdf = pd.concat([gdf1, gdf2], ignore_index=True)\n", + " \n", + " return gpd.GeoDataFrame(joined_gdf, crs=target_crs)\n", + "\n", + "\n", + " guf_residential_gdf = join_geodataframes(guf, residential_areas)\n", + "\n", + "\n", + " ## create a small buffer to ensure all areas a captured correctly\n", + "\n", + " def buffer_geometries_in_meters(gdf, distance):\n", + " # Define the World Mercator projected CRS\n", + " projected_crs = 'EPSG:3395' # World Mercator\n", + "\n", + " # Project to the new CRS\n", + " gdf_projected = gdf.to_crs(projected_crs)\n", + " \n", + " # Buffer the geometries\n", + " gdf_projected['geometry'] = gdf_projected['geometry'].buffer(distance)\n", + " \n", + " # Reproject back to the original CRS\n", + " gdf_buffered = gdf_projected.to_crs(gdf.crs)\n", + " \n", + " return gdf_buffered\n", + "\n", + "\n", + " guf_residential_gdf = buffer_geometries_in_meters(guf_residential_gdf, 100) # Buffer by 100 meters\n", + "\n", + "\n", + " ## union into one gdf\n", + "\n", + " def unary_union_polygons(gdf):\n", + " # Combine all geometries into a single geometry\n", + " unified_geometry = unary_union(gdf['geometry'])\n", + " \n", + " # Create a new GeoDataFrame with a single row containing the unified geometry\n", + " combined_gdf = gpd.GeoDataFrame({'geometry': [unified_geometry]}, crs=gdf.crs)\n", + " \n", + " return combined_gdf\n", + "\n", + "\n", + " guf_residential_gdf = unary_union_polygons(guf_residential_gdf)\n", + "\n", + " # set to BNG\n", + " guf_residential_gdf = guf_residential_gdf.to_crs(\"27700\")\n", + "\n", + " # Function to remove holes from neighbourhoods\n", + " def remove_holes(polygon):\n", + " if polygon.geom_type == 'Polygon':\n", + " return Polygon(polygon.exterior)\n", + " else:\n", + " return polygon\n", + "\n", + " # remove holes from urban footprint\n", + " guf_residential_gdf['geometry'] = guf_residential_gdf['geometry'].apply(remove_holes)\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " def get_rivers(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves river features within a given boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): A GeoDataFrame representing the buffered boundary.\n", + "\n", + " Returns:\n", + " GeoDataFrame: A GeoDataFrame containing the river features within the boundary.\n", + " \"\"\"\n", + " # Ensure the boundary is in the correct CRS for the query\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + "\n", + " # Check the content of boundary_buffered to ensure it's not empty and correctly transformed\n", + " if boundary_buffered.empty:\n", + " raise ValueError(\"The provided boundary is empty.\")\n", + "\n", + " # Define the tags for waterways\n", + " tags = {\"waterway\": [\"river\", \"rapids\"]}\n", + "\n", + " try:\n", + " # Fetch features from OSM using the boundary geometry\n", + " polygon = boundary_buffered.geometry.iloc[0]\n", + " rivers = ox.features_from_polygon(polygon=polygon, tags=tags)\n", + "\n", + " # Dropping rows where 'tunnel' is equal to 'culvert'\n", + " if 'tunnel' in rivers.columns:\n", + " rivers = rivers[rivers['tunnel'] != 'culvert']\n", + "\n", + " # Convert the CRS back to the desired one\n", + " rivers = rivers.to_crs('EPSG:27700')\n", + "\n", + " # Set the geometry column explicitly\n", + " rivers = rivers.set_geometry('geometry')\n", + "\n", + " return rivers\n", + "\n", + " except InsufficientResponseError:\n", + " print(\"No data elements found for the given boundary and tags.\")\n", + " empty_geometry = {'geometry': [LineString()]}\n", + " rivers = gpd.GeoDataFrame(empty_geometry, crs='EPSG:27700')\n", + " return rivers # Return an empty GeoDataFrame if no data found\n", + "\n", + "\n", + "\n", + " def get_landuse(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves the landuse features based on the specified boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): The buffered boundary polygon.\n", + "\n", + " Returns:\n", + " GeoDataFrame: The landuse features.\n", + " \"\"\"\n", + " # reset boundary crs to allow for features to be found\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + " # Define tags\n", + " tags = {\"landuse\": [\"industrial\", \"railway\", \"brownfield\", \"commercial\", \"farmland\", \"meadow\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " landuse = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " landuse = landuse.to_crs('27700')\n", + "\n", + " ## get unsuitable \"nature\" types\n", + " # Define tags\n", + " tags = {\"natural\": [\"wood\", \"water\", \"scrub\", \"coastline\", \"beach\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " nature = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " nature = nature.to_crs('27700')\n", + "\n", + " ## get unsuitable \"lesiure\" types. This is mainly for golfcourses\n", + " # Define tags\n", + " tags = {\"leisure\": [\"golf_course\", \"track\", \"park\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " leisure = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " leisure = leisure.to_crs('27700')\n", + " # Define the tags for aeroway\n", + " aeroway_tags = {\"aeroway\": [\"aerodrome\"]}\n", + " # Use the function to retrieve aeroway features\n", + " aeroway = retrieve_osm_features(polygon=boundary_buffered.iloc[0], tags=aeroway_tags)\n", + " # Check if any features were retrieved\n", + " if aeroway is not None:\n", + " if not aeroway.empty:\n", + " # set/reset crs\n", + " aeroway = aeroway.to_crs('27700')\n", + "\n", + " # concat\n", + " landuse = pd.concat([landuse, nature, leisure, aeroway])\n", + "\n", + " ## resest boundary crs\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:27700')\n", + "\n", + " return landuse\n", + "\n", + "\n", + " def get_bus_routes(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves bus routes from OSM/NAPTAN within a given boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): A GeoDataFrame representing the boundary.\n", + "\n", + " Returns:\n", + " bus_routes (GeoDataFrame): A GeoDataFrame containing the bus routes.\n", + "\n", + " Raises:\n", + " Exception: If there is an error fetching the data from the Overpass API.\n", + " \"\"\"\n", + " # reset boundary crs to allow for features to be found\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + "\n", + " # Calculate the bounding box for XML query\n", + " bounding_box = boundary_buffered.bounds\n", + "\n", + " # Extract the minimum and maximum coordinates\n", + " minx = bounding_box['minx'].min()\n", + " miny = bounding_box['miny'].min()\n", + " maxx = bounding_box['maxx'].max()\n", + " maxy = bounding_box['maxy'].max()\n", + "\n", + " # Create a list of four elements representing the bounding box\n", + " bbox = [minx, miny, maxx, maxy]\n", + "\n", + " # reset boundary_buffer crs\n", + " boundary_buffered = boundary_buffered.to_crs('27700')\n", + "\n", + " # Define the Overpass API endpoint\n", + " overpass_url = \"https://overpass-api.de/api/interpreter\"\n", + "\n", + " # Define the XML query\n", + " xml_query = f\"\"\"\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \"\"\"\n", + "\n", + " # Initialize lists to store data\n", + " geometries = []\n", + " element_data = []\n", + "\n", + " # Make the Overpass API request\n", + " response = requests.post(overpass_url, data=xml_query)\n", + "\n", + " # Check if the request was successful\n", + " if response.status_code == 200:\n", + " data = response.json()\n", + "\n", + " # Access the data from the response\n", + " for element in data.get(\"elements\", []):\n", + " if element.get('type') == 'way' and 'geometry' in element:\n", + " # Extract geometry coordinates from 'geometry' field\n", + " coordinates = [(node['lon'], node['lat']) for node in element['geometry']]\n", + " # Create a LineString geometry\n", + " line = LineString(coordinates)\n", + " geometries.append(line)\n", + " element_data.append(element)\n", + "\n", + " # Create a GeoDataFrame\n", + " bus_routes = gpd.GeoDataFrame(element_data, geometry=geometries)\n", + "\n", + " # Set CRS\n", + " bus_routes = bus_routes.set_crs('4326')\n", + " bus_routes = bus_routes.to_crs('27700')\n", + "\n", + " return bus_routes\n", + "\n", + " else:\n", + " raise Exception(f\"Error fetching data: {response.status_code} - {response.text}\")\n", + "\n", + "\n", + "\n", + " def clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered):\n", + " \"\"\"\n", + " Clips the geospatial data to the boundary_buffered extent.\n", + "\n", + " Parameters:\n", + " - os_open_roads (GeoDataFrame): lines from OS Open roads.\n", + " - rivers (GeoDataFrame): lines of Rivers.\n", + " - railways (GeoDataFrame): lines of Railways.\n", + " - landuse (GeoDataFrame): Land use polygons.\n", + " - bus_routes (GeoDataFrame): lines of bus routes.\n", + " - boundary_buffered (GeoDataFrame): buffered boundary.\n", + "\n", + " Returns:\n", + " - clipped versions of input geodataframes, aside from the bufferd boundary.\n", + " \"\"\"\n", + " os_open_roads_clip = gpd.clip(os_open_roads, boundary_buffered)\n", + " rivers_clip = gpd.clip(rivers, boundary_buffered)\n", + " railways_clip = gpd.clip(railways, boundary_buffered)\n", + " landuse_clip = gpd.clip(landuse, boundary_buffered)\n", + " bus_routes_clip = gpd.clip(bus_routes, boundary_buffered)\n", + "\n", + " return os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip\n", + "\n", + "\n", + " def process_bus_routes(bus_routes_clip, buffer_distance):\n", + " \"\"\"\n", + " Count the number of bus routes per road and remove roads with more than one bus route on them.\n", + " \n", + " Args:\n", + " bus_routes_clip (GeoDataFrame): The input GeoDataFrame containing bus routes.\n", + " buffer_distance (float): The buffer distance to convert roads to polygons, set in meters.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The filtered GeoDataFrame containing roads with less than or equal to one bus route.\n", + " \"\"\"\n", + " # Create a new GeoDataFrame with the buffered geometries\n", + " bus_routes_buffered = bus_routes_clip.copy() # Copy the original GeoDataFrame\n", + " bus_routes_buffered['geometry'] = bus_routes_buffered['geometry'].buffer(buffer_distance)\n", + "\n", + " # count the number of overlapping bus routes\n", + " def count_overlapping_features(gdf):\n", + " \"\"\"\n", + " Count the number of overlapping features in a GeoDataFrame.\n", + " \n", + " Args:\n", + " gdf (GeoDataFrame): The input GeoDataFrame.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The input GeoDataFrame with an additional column 'Bus_routes_count' indicating the count of overlapping features.\n", + " \"\"\"\n", + " # Create an empty column to store the count of overlapping features\n", + " gdf['Bus_routes_count'] = 0\n", + "\n", + " # Iterate through each row in the GeoDataFrame\n", + " for idx, row in gdf.iterrows():\n", + " # Get the geometry of the current row\n", + " geometry = row['geometry']\n", + " \n", + " # Use a spatial filter to find overlapping features\n", + " overlaps = gdf[gdf['geometry'].intersects(geometry)]\n", + " \n", + " # Update the Bus_routes_count column with the count of overlapping features\n", + " gdf.at[idx, 'Bus_routes_count'] = len(overlaps)\n", + " \n", + " return gdf\n", + "\n", + " # call function\n", + " bus_routes_buffered_with_count = count_overlapping_features(bus_routes_buffered)\n", + "\n", + " # drop any roads which have less than two bus routes on them\n", + " bus_routes_filtered = bus_routes_buffered_with_count[bus_routes_buffered_with_count['Bus_routes_count'] >= 2]\n", + " \n", + " return bus_routes_filtered\n", + "\n", + "\n", + "\n", + " def filter_OS_boundary_roads(os_open_roads_clip):\n", + " \"\"\"\n", + " Filter the `os_open_roads_clip` DataFrame to select boundary roads.\n", + "\n", + " This function filters the `os_open_roads_clip` DataFrame to select roads that are considered \"boundary\" roads. \n", + " The selection criteria include roads that have the following attributes:\n", + " - `primary_route` is True\n", + " - `trunk_road` is True\n", + " - `fictitious` is True\n", + " - `road_classification` is 'A Road' or 'B Road'\n", + " - `road_function` is 'Minor Road' or 'Motorway'\n", + "\n", + " The filtered DataFrame is returned.\n", + "\n", + " Note: The commented line `(os_open_roads_clip['road_function'] == 'Restricted Local Access Road')` is excluded from the selection.\n", + "\n", + " Parameters:\n", + " - os_open_roads_clip (DataFrame): A DataFrame containing road data.\n", + "\n", + " Returns:\n", + " - boundary_roads (DataFrame): A DataFrame containing the filtered boundary roads.\n", + "\n", + " Example usage:\n", + " # Assuming `os_open_roads_clip` is a DataFrame containing road data\n", + " boundary_roads = filter_boundary_roads(os_open_roads_clip)\n", + " \"\"\"\n", + " boundary_roads = os_open_roads_clip.loc[((os_open_roads_clip['primary_route'] == 'True') |\n", + " (os_open_roads_clip['trunk_road'] == 'True') |\n", + " (os_open_roads_clip['fictitious'] == 'True') |\n", + " (os_open_roads_clip['road_classification'] == 'A Road') | \n", + " (os_open_roads_clip['road_classification'] == 'B Road') | \n", + " (os_open_roads_clip['road_function'] == 'Minor Road') |\n", + " (os_open_roads_clip['road_function'] == 'Motorway') |\n", + " (os_open_roads_clip['road_function'] == 'Minor Road') \n", + " )]\n", + " return boundary_roads\n", + "\n", + "\n", + "\n", + " ## buffering and dissolving functions\n", + " \n", + " def buffer_and_dissolve(input_gdf):\n", + " \"\"\"\n", + " Buffer and dissolve a GeoDataFrame.\n", + " \n", + " Args:\n", + " input_gdf (GeoDataFrame): The input GeoDataFrame.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The buffered and dissolved GeoDataFrame.\n", + " \"\"\"\n", + " # Buffer around boundaries\n", + " buffered_gdf = input_gdf.copy() # Create a copy to avoid modifying the original\n", + " buffered_gdf['geometry'] = buffered_gdf['geometry'].buffer(5) # set a 5 meter buffer\n", + "\n", + " # Dissolve the geometries\n", + " dissolved_geo = buffered_gdf.unary_union\n", + "\n", + " # Create a new GeoDataFrame with the dissolved geometry\n", + " dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo])\n", + "\n", + " # Set the CRS (Coordinate Reference System)\n", + " dissolved_gdf.crs = input_gdf.crs\n", + "\n", + " return dissolved_gdf\n", + "\n", + "\n", + " def dissolve_gdf(input_gdf):\n", + " # dissolve geometries\n", + " dissolved_geo = input_gdf.unary_union\n", + " dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo])\n", + " dissolved_gdf.crs = input_gdf.crs\n", + "\n", + " return dissolved_gdf\n", + "\n", + "\n", + " def erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf):\n", + " \"\"\"\n", + " Erases boundary features from the given boundary geometry.\n", + "\n", + " Parameters:\n", + " - boundary: GeoDataFrame representing the boundary geometry\n", + " - boundary_rivers_bd: GeoDataFrame representing the rivers boundary features\n", + " - boundary_roads_bd: GeoDataFrame representing the roads boundary features\n", + " - boundary_rail_bd: GeoDataFrame representing the rail boundary features\n", + " - boundary_landuse_bd: GeoDataFrame representing the landuse boundary features\n", + " - boundary_bus_routes_bd: GeoDataFrame representing the bus routes boundary features\n", + "\n", + " Returns:\n", + " - erased_boundary_gdf: GeoDataFrame containing the result of the \"Erase\" operation\n", + " \"\"\"\n", + "\n", + " # ensure that neighbourhoods fall only within urban footprint areas\n", + " boundary = gpd.clip(boundary, guf_residential_gdf)\n", + "\n", + " # Join all boundary features\n", + " boundaries = pd.concat([boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd], ignore_index=True)\n", + " boundary_features = dissolve_gdf(boundaries)\n", + "\n", + " # Use the `difference` method to perform the \"Erase\" operation\n", + " erased_boundary = boundary.difference(boundary_features.unary_union)\n", + "\n", + " # Convert the GeoSeries to a single geometry using unary_union\n", + " erased_boundary = erased_boundary.unary_union\n", + "\n", + " # Create a new GeoDataFrame with the result of \"Erase\" operation\n", + " erased_boundary_gdf = gpd.GeoDataFrame(geometry=[erased_boundary], crs=boundary.crs)\n", + "\n", + " # Explode multipolygon to polygons\n", + " erased_boundary_gdf = erased_boundary_gdf.explode()\n", + "\n", + " return erased_boundary_gdf\n", + "\n", + "\n", + " def drop_large_or_small_areas(neighbourhoods):\n", + " \"\"\"\n", + " Drops rows from the 'neighbourhoods' DataFrame where the area is less than 10,000 square units or greater than 5,000,000 square units.\n", + "\n", + " Parameters:\n", + " - neighbourhoods (DataFrame): The input DataFrame containing neighbourhood data.\n", + "\n", + " Returns:\n", + " - neighbourhoods (DataFrame): The updated DataFrame with small areas dropped.\n", + " \"\"\"\n", + " # Calculate area\n", + " neighbourhoods[\"area\"] = neighbourhoods.geometry.area\n", + "\n", + " # Drop rows where area is less than 10,000 or greater than 5,000,000\n", + " neighbourhoods = neighbourhoods.loc[(neighbourhoods[\"area\"] >= 10000)]\n", + " neighbourhoods = neighbourhoods.loc[(neighbourhoods[\"area\"] <= 5000000)]\n", + "\n", + " return neighbourhoods\n", + "\n", + "\n", + " def filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, polygon_column_name):\n", + " \"\"\"\n", + " Count the number of roads within each polygon in a GeoDataFrame and filter the neighbourhoods based on road count and road density.\n", + " \n", + " Args:\n", + " neighbourhoods (GeoDataFrame): GeoDataFrame containing neighbourhood polygons.\n", + " os_open_roads_clip (GeoDataFrame): GeoDataFrame containing road data.\n", + " polygon_column_name (str): Name of the column in neighbourhoods to use for grouping.\n", + "\n", + " Returns:\n", + " GeoDataFrame: Updated neighbourhoods GeoDataFrame with filtered rows based on road count and road density.\n", + " \"\"\"\n", + " \n", + " def count_roads_within_polygons(polygons_gdf, roads_gdf, polygon_column_name):\n", + " \"\"\"\n", + " Count the number of roads within each polygon in a GeoDataFrame.\n", + " \n", + " Args:\n", + " polygons_gdf (GeoDataFrame): GeoDataFrame containing polygons.\n", + " roads_gdf (GeoDataFrame): GeoDataFrame containing roads.\n", + " polygon_column_name (str): Name of the column in polygons_gdf to use for grouping.\n", + "\n", + " Returns:\n", + " GeoDataFrame: Original polygons GeoDataFrame with a \"road_count\" column added.\n", + " \"\"\"\n", + " \n", + " # spatial join\n", + " joined = gpd.sjoin(polygons_gdf, roads_gdf, how='left', op='intersects')\n", + " \n", + " # Group by the polygon column and count the number of roads in each\n", + " road_counts = joined.groupby(polygon_column_name).size().reset_index(name='road_count')\n", + " \n", + " # Merge the road counts back into the polygons GeoDataFrame\n", + " polygons_gdf = polygons_gdf.merge(road_counts, on=polygon_column_name, how='left')\n", + "\n", + " # Calculate road density (area divided by road_count). It is multiplied by 10000 for ease of understanding the numbers involved with this\n", + " polygons_gdf['road_density'] = (polygons_gdf['road_count'] / polygons_gdf['area'] ) * 10000\n", + " \n", + " return polygons_gdf\n", + " \n", + " neighbourhoods = count_roads_within_polygons(neighbourhoods, os_open_roads_clip, polygon_column_name)\n", + "\n", + " # Drop rows with road_density below 0.2 or less than 4 roads\n", + " neighbourhoods = neighbourhoods[(neighbourhoods['road_count'] > 2)]\n", + " neighbourhoods = neighbourhoods[(neighbourhoods['road_density'] > 0.2)]\n", + " \n", + " return neighbourhoods\n", + "\n", + "\n", + " def remove_holes(polygon):\n", + " \"\"\"\n", + " Removes holes from a polygon. Mostly for visual reasons.\n", + "\n", + " Parameters:\n", + " polygon (Polygon): The polygon to remove holes from.\n", + "\n", + " Returns:\n", + " Polygon: The polygon without holes.\n", + " \"\"\"\n", + " if polygon.geom_type == 'Polygon':\n", + " return Polygon(polygon.exterior)\n", + " else:\n", + " return polygon\n", + "\n", + " landuse = get_landuse(boundary_buffered)\n", + " rivers = get_rivers(boundary_buffered)\n", + " railways = get_railways(place)\n", + " landuse = get_landuse(boundary_buffered)\n", + " bus_routes = get_bus_routes(boundary_buffered)\n", + " os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip = clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered)\n", + " bus_routes_clip = process_bus_routes(bus_routes_clip, 0.2)\n", + " boundary_roads = filter_OS_boundary_roads(os_open_roads_clip)\n", + "\n", + " ## buffer and dissolve \n", + " boundary_roads_bd = buffer_and_dissolve(boundary_roads)\n", + " boundary_rivers_bd = buffer_and_dissolve(rivers_clip)\n", + " boundary_rail_bd = buffer_and_dissolve(railways_clip)\n", + " boundary_landuse_bd = buffer_and_dissolve(landuse_clip)\n", + " boundary_bus_routes_bd = buffer_and_dissolve(bus_routes_clip)\n", + "\n", + " ## geodataframe cleaning\n", + " erased_boundary_gdf = erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf)\n", + " neighbourhoods = erased_boundary_gdf\n", + " neighbourhoods = drop_large_or_small_areas(neighbourhoods)\n", + "\n", + " neighbourhoods = filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, 'geometry')\n", + "\n", + " ## create unique IDs\n", + " # simple number based ID\n", + " neighbourhoods['ID'] = range(1, len(neighbourhoods) + 1)\n", + "\n", + " neighbourhoods['geometry'] = neighbourhoods['geometry'].apply(remove_holes)\n", + "\n", + "\n", + " ## filter neighbourhoods to only locations with more than 1 intersection (1 or fewer intersections indicates that all travel modes will be the same)\n", + " # reset neighbourhoods crs\n", + " neighbourhoods = neighbourhoods.to_crs('4326')\n", + "\n", + " # Spatial join to count points within each neighborhood\n", + " spatial_join = gpd.sjoin(neighbourhoods, common_nodes_gdf, how='left', op='contains')\n", + "\n", + " # Group by 'ID' and count the points within each neighborhood\n", + " point_counts = spatial_join.groupby('ID').size().reset_index(name='point_count')\n", + "\n", + " # Filter out neighborhoods with 1 or 0 points\n", + " filtered_neighbourhood_ids = point_counts[point_counts['point_count'] > 1]['ID']\n", + "\n", + " neighbourhoods= neighbourhoods[neighbourhoods['ID'].isin(filtered_neighbourhood_ids)]\n", + "\n", + "\n", + "\n", + " ## we also need to join the length of the streets within the neighbourhood for further analysis\n", + " # Reset index of neighbourhoods\n", + " neighbourhoods = neighbourhoods.reset_index(drop=True)\n", + "\n", + " # reset neighbourhoods crs\n", + " neighbourhoods = neighbourhoods.to_crs('27700')\n", + "\n", + " # Perform a spatial join\n", + " joined_data = gpd.sjoin(os_open_roads_clip, neighbourhoods, how=\"inner\", op=\"intersects\")\n", + "\n", + " # Group by neighborhood and calculate total road length\n", + " road_lengths = joined_data.groupby('index_right')['length'].sum().reset_index()\n", + "\n", + " # Merge road_lengths with neighbourhoods and drop 'index_right' column\n", + " neighbourhoods = neighbourhoods.merge(road_lengths, left_index=True, right_on='index_right', how='left').drop(columns=['index_right'])\n", + "\n", + " # Rename the column\n", + " neighbourhoods.rename(columns={'length': 'road_lengths'}, inplace=True)\n", + "\n", + " if return_all == False:\n", + " return neighbourhoods\n", + " else:\n", + " return neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets, boundary, all_streets, boundary_roads\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def neighbourhood_permeability(place, neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets):\n", + " ### find permablity\n", + "\n", + " ## all to all shortest path calculation\n", + " def calculate_distance_stats_from_points(points_gdf, network):\n", + " all_pairs_shortest_paths = {}\n", + " points_osmids = points_gdf.index.tolist() # Assuming the 'osmid' is the index in the GeoDataFrame\n", + "\n", + " for start_node in points_osmids:\n", + " shortest_paths = {}\n", + " try:\n", + " for end_node in points_osmids:\n", + " if start_node != end_node:\n", + " distance = nx.shortest_path_length(network, start_node, end_node, weight='length')\n", + " shortest_paths[end_node] = distance\n", + " all_pairs_shortest_paths[start_node] = shortest_paths\n", + " except nx.NetworkXNoPath:\n", + " # If no path is found, skip adding to all_pairs_shortest_paths\n", + " continue\n", + "\n", + " distances = [length for paths in all_pairs_shortest_paths.values() for length in paths.values()]\n", + "\n", + " mean_distance = statistics.mean(distances)\n", + " median_distance = statistics.median(distances)\n", + " min_distance = min(distances)\n", + " max_distance = max(distances)\n", + " distance_range = max_distance - min_distance\n", + " total_distance = sum(distances)\n", + "\n", + " return {\n", + " \"mean_distance\": mean_distance,\n", + " \"median_distance\": median_distance,\n", + " \"min_distance\": min_distance,\n", + " \"max_distance\": max_distance,\n", + " \"distance_range\": distance_range,\n", + " \"total_distance\": total_distance\n", + " }\n", + "\n", + " ## processing for all to all \n", + " results = []\n", + "\n", + " for index, row in neighbourhoods.iterrows():\n", + " neighbourhood = neighbourhoods.loc[[index]]\n", + "\n", + " ## get neighbourhood boundary and neighbourhood boundary buffer\n", + " # set crs\n", + " neighbourhood = neighbourhood.to_crs('27700')\n", + " # create a buffer neighbourhood\n", + " neighbourhood_buffer = neighbourhood['geometry'].buffer(15)\n", + " # convert back to a geodataframe (for later on)\n", + " neighbourhood_buffer = gpd.GeoDataFrame(geometry=neighbourhood_buffer)\n", + " # reset crs\n", + " neighbourhood, neighbourhood_buffer = neighbourhood.to_crs('4326'), neighbourhood_buffer.to_crs('4326')\n", + "\n", + "\n", + " ## get nodes which can be driven to and walked to within area\n", + " neighbourhood_nodes = gpd.clip(common_nodes_gdf, neighbourhood_buffer)\n", + "\n", + " ## get length of total edges within the neighbourhood\n", + " edges_within_neighbourhood = gpd.sjoin(all_edges, neighbourhood, how=\"inner\", op=\"intersects\")\n", + " total_length = edges_within_neighbourhood['length'].sum()\n", + "\n", + "\n", + " ## calculate neighbourhood distance stats for walking and driving\n", + " walk_stats = calculate_distance_stats_from_points(neighbourhood_nodes, walk_streets)\n", + " drive_stats = calculate_distance_stats_from_points(neighbourhood_nodes, drive_streets)\n", + "\n", + "\n", + " ## Add the statistics to the GeoDataFrame\n", + " neighbourhood['walk_mean_distance'] = walk_stats['mean_distance']\n", + " neighbourhood['walk_median_distance'] = walk_stats['median_distance']\n", + " neighbourhood['walk_min_distance'] = walk_stats['min_distance']\n", + " neighbourhood['walk_max_distance'] = walk_stats['max_distance']\n", + " neighbourhood['walk_distance_range'] = walk_stats['distance_range']\n", + " neighbourhood['walk_total_distance'] = walk_stats['total_distance']\n", + "\n", + " neighbourhood['drive_mean_distance'] = drive_stats['mean_distance']\n", + " neighbourhood['drive_median_distance'] = drive_stats['median_distance']\n", + " neighbourhood['drive_min_distance'] = drive_stats['min_distance']\n", + " neighbourhood['drive_max_distance'] = drive_stats['max_distance']\n", + " neighbourhood['drive_distance_range'] = drive_stats['distance_range']\n", + " neighbourhood['drive_total_distance'] = drive_stats['total_distance']\n", + "\n", + " ## Store statistics along with neighborhood ID or other identifying information\n", + " result = {\n", + " 'neighbourhood_id': neighbourhood['ID'].iloc[0], # Assuming you have an ID column\n", + " 'walk_mean_distance': walk_stats['mean_distance'],\n", + " 'walk_median_distance': walk_stats['median_distance'],\n", + " 'walk_total_distance': walk_stats['total_distance'],\n", + " \n", + "\n", + " 'drive_mean_distance': drive_stats['mean_distance'],\n", + " 'drive_median_distance': drive_stats['median_distance'],\n", + " 'drive_total_distance': drive_stats['total_distance'],\n", + "\n", + " 'total_edge_length': total_length\n", + " }\n", + " results.append(result)\n", + "\n", + " ## Convert the results to a new dataframe\n", + " results_df = pd.DataFrame(results)\n", + "\n", + "\n", + " ## calculate differances\n", + "\n", + " results_df['mean_distance_diff'] = results_df['walk_mean_distance'] - results_df['drive_mean_distance']\n", + " results_df['median_distance_diff'] = results_df['walk_median_distance'] - results_df['drive_median_distance']\n", + " results_df['total_distance_diff'] = results_df['walk_total_distance'] - results_df['drive_total_distance']\n", + "\n", + " merged_df = pd.merge(neighbourhoods, results_df, left_on = \"ID\", right_on = \"neighbourhood_id\")\n", + " access_results_gdf = gpd.GeoDataFrame(merged_df, geometry='geometry')\n", + "\n", + " return access_results_gdf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_modal_filters(place, neighbourhoods, boundary, all_streets):\n", + " def get_barriers(boundary):\n", + " \"\"\"\n", + " Find modal filters within a given boundary.\n", + "\n", + " Args:\n", + " boundary (geopandas.GeoDataFrame): A GeoDataFrame representing the boundary.\n", + "\n", + " Returns:\n", + " barriers (geopandas.GeoDataFrame): A GeoDataFrame containing the modal filters.\n", + " streets_gdf (geopandas.GeoDataFrame): A GeoDataFrame containing the streets from OSM.\n", + "\n", + " \"\"\"\n", + "\n", + " # get the boundary in the correct CRS for OSMnx\n", + " boundary_4326 = boundary.to_crs('EPSG:4326')\n", + "\n", + " # get the most \"basic\" filters mapped, the barriers/bollards etc\n", + " # Define tags\n", + " tags = {\"barrier\": [\"bollard\", \"bus_trap\", \"entrance\", \"planter\", \"sump_buster\", \"wedge\"]}\n", + "\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " barriers = ox.features_from_polygon(polygon=boundary_4326.geometry.iloc[0], tags=tags)\n", + "\n", + " # process any linestrings into point geometries\n", + " # Filter the GeoDataFrame to select only rows with \"linestring\" geometry\n", + " barriers_linestrings = barriers[barriers['geometry'].geom_type == 'LineString']\n", + "\n", + " # Create an empty GeoDataFrame to store the individual points\n", + " points_gdf = gpd.GeoDataFrame(columns=list(barriers_linestrings.columns), crs=barriers_linestrings.crs)\n", + "\n", + " # Iterate through each row in the GeoDataFrame with linestrings\n", + " for idx, row in barriers_linestrings.iterrows():\n", + " if isinstance(row['geometry'], LineString):\n", + " # Extract the individual points from the linestring\n", + " points = [Point(coord) for coord in list(row['geometry'].coords)]\n", + "\n", + " # Create a GeoDataFrame from the individual points and copy the attributes\n", + " points_df = gpd.GeoDataFrame(geometry=points, crs=barriers_linestrings.crs)\n", + " for col in barriers_linestrings.columns:\n", + " if col != 'geometry':\n", + " points_df[col] = row[col]\n", + "\n", + " # Rename the \"geometry\" column to \"merged_geometry\"\n", + " points_df = points_df.rename(columns={'geometry': 'merged_geometry'})\n", + "\n", + " # Append the points to the points_gdf\n", + " points_gdf = pd.concat([points_gdf, points_df], ignore_index=True)\n", + "\n", + " # Now, points_gdf contains all the individual points from the linestrings with inherited attributes\n", + "\n", + " # Remove the \"geometry\" column from the points GeoDataFrame\n", + " points_gdf = points_gdf.drop(columns=['geometry'])\n", + "\n", + " # Remove the linestring rows from the original GeoDataFrame\n", + " barriers = barriers[barriers['geometry'].geom_type != 'LineString']\n", + "\n", + " # Rename the \"merged_geometry\" column to \"geometry\" in the points GeoDataFrame\n", + " points_gdf = points_gdf.rename(columns={'merged_geometry': 'geometry'})\n", + "\n", + " # Concatenate the individual points GeoDataFrame to the original GeoDataFrame\n", + " barriers = pd.concat([barriers, points_gdf], ignore_index=True)\n", + "\n", + " # Reset the index to ensure it is continuous\n", + " barriers.reset_index(drop=True, inplace=True)\n", + "\n", + " # Create a new column \"previously_linestring\" and set it to False initially\n", + " barriers['previously_linestring'] = False\n", + "\n", + " # Iterate through each row in the GeoDataFrame with linestrings\n", + " for idx, row in barriers_linestrings.iterrows():\n", + " if isinstance(row['geometry'], LineString):\n", + " # Extract the individual points from the linestring\n", + " points = [Point(coord) for coord in list(row['geometry'].coords)]\n", + "\n", + " # Iterate through the points in the linestring\n", + " for point in points:\n", + " # Check if the point's geometry intersects with any of the original linestrings\n", + " mask = barriers['geometry'].intersects(point)\n", + " if mask.any():\n", + " # If the point intersects with any linestring, set \"previously_linestring\" to True\n", + " barriers.loc[mask, 'previously_linestring'] = True\n", + "\n", + " # add a unique ID\n", + " barriers['barrier_id'] = range(1, len(barriers) + 1)\n", + "\n", + " # Convert the OSMnx graph to a GeoDataFrame of streets\n", + " streets_gdf = ox.graph_to_gdfs(all_streets, nodes=False, edges=True)\n", + "\n", + " # join the barriers to the streets\n", + " streets_gdf = gpd.sjoin(streets_gdf, barriers, how=\"left\", op=\"intersects\")\n", + "\n", + " # clean geodataframe and drop streets without a barrier\n", + " streets_gdf.columns = streets_gdf.columns.str.replace(\"_right\", \"_barrier\").str.replace(\"_left\", \"_street\")\n", + " # we need to double check the name of \"barrier\"\n", + " streets_gdf['barrier_barrier'] = streets_gdf['barrier'] if 'barrier' in streets_gdf.columns else streets_gdf[\n", + " 'barrier_barrier']\n", + "\n", + " if 'name_street' in streets_gdf.columns:\n", + " streets_gdf = streets_gdf.rename(columns={'name_street': 'name'})\n", + " barrier_streets = streets_gdf.dropna(subset=['barrier_barrier'])\n", + "\n", + " # add barrier tag\n", + " barrier_streets['filter_type'] = 'barrier or bollard'\n", + "\n", + " ## extract points which are on/within 1m of streets only\n", + " streets_gdf['has_barrier'] = 'yes'\n", + "\n", + " # reset crs before spatial join\n", + " barriers, streets_gdf = barriers.to_crs(3857), streets_gdf.to_crs(3857)\n", + "\n", + " barriers = gpd.sjoin_nearest(barriers, streets_gdf, how=\"left\", max_distance=1)\n", + " barriers = barriers.dropna(subset=['has_barrier'])\n", + " barriers = barriers.reset_index(drop=True) # Reset the index\n", + "\n", + " # Dissolve based on the 'geometry' column\n", + " barriers = barriers.dissolve(by='barrier_id_right')\n", + "\n", + " # add barrier tag\n", + " barriers['filter_type'] = 'barrier or bollard'\n", + "\n", + " # Reset the index to remove multi-index\n", + " barriers.reset_index(drop=True, inplace=True)\n", + "\n", + " return barriers, streets_gdf\n", + "\n", + "\n", + " def get_bus_gates(streets_gdf):\n", + " \"\"\"\n", + " Finds all the bus gates within the given streets GeoDataFrame.\n", + "\n", + " Parameters:\n", + " streets_gdf (GeoDataFrame): A GeoDataFrame containing street data.\n", + "\n", + " Returns:\n", + " busgates (GeoDataFrame): A GeoDataFrame containing the bus gates found in the streets data.\n", + "\n", + " \"\"\"\n", + "\n", + " # we need to double check the name of \"access\"\n", + " streets_gdf['access_street'] = streets_gdf['access'] if 'access' in streets_gdf.columns else streets_gdf['access_street']\n", + " streets_gdf['bicycle_street'] = streets_gdf['bicycle'] if 'bicycle' in streets_gdf.columns else streets_gdf['bicycle_street']\n", + " streets_gdf['bus'] = streets_gdf['bus_street'] if 'bus_street' in streets_gdf.columns else streets_gdf['bus']\n", + "\n", + " busgates = streets_gdf[((streets_gdf[\"bus\"] == \"yes\") & (streets_gdf[\"access_street\"] == \"no\") & (streets_gdf[\"bicycle_street\"] == \"yes\")) |\n", + " (streets_gdf[\"bus\"] == \"yes\") & (streets_gdf[\"motor_vehicle_street\"] == \"no\") & (streets_gdf[\"bicycle_street\"] == \"yes\")\n", + " ]\n", + "\n", + " # add bus gate tag\n", + " busgates['filter_type'] = 'bus gate'\n", + "\n", + " return busgates, streets_gdf\n", + "\n", + " def get_contraflows(streets_gdf):\n", + " \"\"\"\n", + " Finds the unrestricted one-way streets for cycling but restricted for cars.\n", + "\n", + " Parameters:\n", + " streets_gdf (GeoDataFrame): A GeoDataFrame containing street data.\n", + "\n", + " Returns:\n", + " GeoDataFrame: A GeoDataFrame containing the unrestricted one-way streets for cycling.\n", + " \"\"\"\n", + "\n", + " # Find one-way streets where cycling is unrestricted but cars are restricted\n", + " oneways = streets_gdf[(streets_gdf[\"oneway\"] == True) & (streets_gdf[\"oneway:bicycle\"] == \"no\")]\n", + "\n", + " # Dissolve the roads with the same name to avoid miscounting the total number of oneways\n", + " oneways['name'] = oneways['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + " oneways = oneways.dissolve(by='name')\n", + "\n", + " # Reset the index\n", + " oneways = oneways.reset_index()\n", + "\n", + " # Add one-way bike tag\n", + " oneways['filter_type'] = 'one-way bike'\n", + "\n", + " return oneways\n", + "\n", + "\n", + "\n", + " def filter_streets_continuations(input_gdf):\n", + " ## clean dataframe\n", + " # Check if 'highway_street' column exists and rename it to 'highway'\n", + " if 'highway_street' in input_gdf.columns:\n", + " input_gdf.rename(columns={'highway_street': 'highway'}, inplace=True)\n", + "\n", + "\n", + "\n", + "\n", + " # filter dataframe \n", + " ## remove indoor roads, these are likey pedestrian only however often don't have any \"cycling\" related tag\n", + " if 'covered' in input_gdf.columns:\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'covered' in str(x))]\n", + " input_gdf = input_gdf[input_gdf['covered'] != 'yes']\n", + " ## also remove footways and steps, as these are almost pedestrain only, never cyclable\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'footway' in str(x))]\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'steps' in str(x))]\n", + "\n", + "\n", + "\n", + " ## clean dataframe\n", + " input_gdf['name'] = input_gdf['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + " input_gdf['highway'] = input_gdf['highway'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + "\n", + "\n", + "\n", + "\n", + " ## perform street continunation filtering\n", + " # Grouping by 'name' and checking for groups with 'pedestrian' and another highway type\n", + " grouped = input_gdf.groupby('name').filter(lambda x: any('pedestrian' in val for val in x['highway']) and len(x['highway'].unique()) > 1)\n", + " street_continuations_gdf = grouped[grouped['highway'].str.contains('pedestrian', case=False, na=False)] # Extracting the rows containing 'pedestrian' in the highway column\n", + "\n", + " ## deal with nan names\n", + "\n", + "\n", + " ## dissolve lines that are very very close to each other\n", + " if not street_continuations_gdf.empty:\n", + " street_continuations_gdf = street_continuations_gdf.to_crs('27700')\n", + " street_continuations_gdf['buffer'] = street_continuations_gdf.geometry.buffer(1)\n", + " dissolved = street_continuations_gdf.dissolve(by='name')\n", + " \n", + " # If a MultiPolygon is formed, convert it to individual polygons\n", + " if isinstance(dissolved.geometry.iloc[0], MultiPolygon):\n", + " dissolved = dissolved.explode()\n", + " \n", + " # Remove the buffer column\n", + " dissolved = dissolved.drop(columns='buffer')\n", + " street_continuations_gdf = dissolved.to_crs('4326')\n", + "\n", + " return street_continuations_gdf\n", + "\n", + "\n", + "\n", + " barriers, streets_gdf = get_barriers(boundary)\n", + " busgates, streets_gdf = get_bus_gates(streets_gdf)\n", + " oneways = get_contraflows(streets_gdf)\n", + " streets_continuations_gdf = filter_streets_continuations(streets_gdf)\n", + "\n", + " # add street conitinuation tag\n", + " streets_continuations_gdf['filter_type'] = 'street continuation'\n", + "\n", + "\n", + " ## ensure correct crs\n", + " barriers, busgates, oneways, streets_continuations_gdf = barriers.to_crs('4326'), busgates.to_crs('4326'), oneways.to_crs('4326'), streets_continuations_gdf.to_crs('4326')\n", + "\n", + " filters = gpd.GeoDataFrame(pd.concat([barriers, busgates, oneways, streets_continuations_gdf], ignore_index=True))\n", + "\n", + "\n", + "\n", + " ## alter neighbourhoods before joining\n", + " # Reset neighbourhood CRS\n", + " filters_results_gdf = neighbourhoods.to_crs('EPSG:27700')\n", + "\n", + " # Buffer to ensure all filters are captured\n", + " filters_results_gdf['geometry'] = filters_results_gdf['geometry'].buffer(5)\n", + "\n", + " # Reset neighbourhood CRS\n", + " filters_results_gdf = filters_results_gdf.to_crs('EPSG:4326')\n", + "\n", + " ## Spatial join\n", + " # Perform a spatial join between neighbourhoods and filters\n", + " joined_data = gpd.sjoin(filters_results_gdf, filters, how=\"left\", predicate=\"intersects\", lsuffix='_neigh', rsuffix='_filt')\n", + "\n", + " # Count the number of each filter within each neighbourhood\n", + " filter_type_counts = joined_data.groupby(['ID', 'filter_type']).size().unstack(fill_value=0)\n", + "\n", + " # Reset the index to make it more readable\n", + " filter_type_counts = filter_type_counts.reset_index()\n", + "\n", + " # Merge the filter_type_counts DataFrame with the neighbourhoods GeoDataFrame on the ID column\n", + " filters_results_gdf = filters_results_gdf.merge(filter_type_counts, on='ID', how='left')\n", + "\n", + " # Define the columns to sum\n", + " columns_to_sum = ['barrier or bollard', 'one-way bike', 'bus gate', 'street continuation']\n", + "\n", + " # Filter out columns that exist in the DataFrame\n", + " existing_columns = [col for col in columns_to_sum if col in filters_results_gdf.columns]\n", + "\n", + " # Sum the values in the existing columns per row\n", + " filters_results_gdf['total_filter_types'] = filters_results_gdf[existing_columns].sum(axis=1)\n", + "\n", + " # Fill NaN values with 0 if necessary\n", + " filters_results_gdf = filters_results_gdf.fillna(0)\n", + "\n", + " # Find locations where filters are found dense\n", + " # Convert road density to numeric if not already\n", + " filters_results_gdf['road_density'] = pd.to_numeric(filters_results_gdf['road_density'], errors='coerce')\n", + "\n", + " # Create new column to hold filters * density value\n", + " filters_results_gdf['filter_road_density'] = filters_results_gdf['total_filter_types'] * filters_results_gdf['road_density']\n", + "\n", + " return filters_results_gdf, filters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_rat_runs(place, neighbourhoods):\n", + " #### rat runs\n", + "\n", + "\n", + " drive_g = ox.graph_from_place(place, network_type='drive', simplify=True)\n", + "\n", + " ## Clean graph and calculate travel times along edges\n", + "\n", + " # Function to clean 'maxspeed' values\n", + " def clean_maxspeed(maxspeed):\n", + " if maxspeed is None:\n", + " return 30 # Replace None with a default value of 30\n", + " elif isinstance(maxspeed, str) and ' mph' in maxspeed:\n", + " return float(maxspeed.replace(' mph', ''))\n", + " elif isinstance(maxspeed, list): # Handle cases where 'maxspeed' is a list\n", + " return [float(speed.replace(' mph', '')) for speed in maxspeed]\n", + " else:\n", + " return maxspeed\n", + "\n", + " # Apply the function to 'maxspeed' in each edge attribute\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " if 'maxspeed' in data:\n", + " data['maxspeed'] = clean_maxspeed(data['maxspeed'])\n", + " else:\n", + " data['maxspeed'] = 30 # Assign default value of 30 if 'maxspeed' is missing\n", + "\n", + " # Function to convert 'maxspeed' to a numeric value\n", + " def convert_maxspeed(maxspeed):\n", + " if isinstance(maxspeed, list) and maxspeed: # Check if 'maxspeed' is a non-empty list\n", + " # If 'maxspeed' is a list, convert the first value to a numeric value\n", + " return convert_single_maxspeed(maxspeed[0])\n", + " else:\n", + " # If 'maxspeed' is not a list or an empty list, convert the single value to a numeric value\n", + " return convert_single_maxspeed(maxspeed)\n", + "\n", + " # Helper function to convert a single maxspeed value to a numeric value\n", + " def convert_single_maxspeed(maxspeed):\n", + " if maxspeed is None:\n", + " return 30 # Replace None with a default value of 30\n", + "\n", + " if isinstance(maxspeed, str):\n", + " # Extract numeric part of the string using regular expression\n", + " numeric_part = ''.join(c for c in maxspeed if c.isdigit() or c == '.')\n", + " return float(numeric_part) if numeric_part else 30 # Default value if no numeric part found\n", + " elif isinstance(maxspeed, (int, float)):\n", + " return maxspeed\n", + " else:\n", + " return 30 # Default value if the type is unknown\n", + "\n", + " # Function to calculate travel time\n", + " def calculate_travel_time(length, maxspeed):\n", + " # Convert 'maxspeed' to a numeric value\n", + " maxspeed_value = convert_maxspeed(maxspeed)\n", + "\n", + " # Convert maxspeed to meters per second\n", + " speed_mps = maxspeed_value * 0.44704 # 1 mph = 0.44704 m/s\n", + "\n", + " # Calculate travel time in seconds using the formula: time = distance/speed\n", + " if length is not None and speed_mps > 0:\n", + " travel_time = length / speed_mps\n", + " return travel_time\n", + " else:\n", + " return None\n", + "\n", + " # Apply the function to 'length' and 'maxspeed' in each edge attribute\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " if 'length' in data:\n", + " data['travel_time'] = calculate_travel_time(data.get('length'), data.get('maxspeed'))\n", + "\n", + "\n", + "\n", + " def get_sparse_graph(drive_g):\n", + " \"\"\"\n", + " Create a sparse graph from bounding roads.\n", + "\n", + " Args:\n", + " drive_g (networkx.Graph): The original graph.\n", + "\n", + " Returns:\n", + " networkx.Graph: The sparse graph.\n", + " \"\"\"\n", + " # Create a copy of the original graph\n", + " sparse_drive_g = drive_g.copy()\n", + "\n", + " # Define the conditions for keeping edges\n", + " conditions = [\n", + " (\n", + " data.get('highway') in ['trunk', 'trunk_link', 'motorway', 'motorway_link', 'primary', 'primary_link',\n", + " 'secondary', 'secondary_link', 'tertiary', 'tertiary_link']\n", + " ) or (\n", + " data.get('maxspeed') in ['60', '70', '40', ('20', '50'), ('30', '60'), ('30', '50'), ('70', '50'),\n", + " ('40', '60'), ('70', '60'), ('60', '40'), ('50', '40'), ('30', '40'),\n", + " ('20', '60'), ('70 ', '40 '), ('30 ', '70')]\n", + " )\n", + " for u, v, k, data in sparse_drive_g.edges(keys=True, data=True)\n", + " ]\n", + "\n", + " # Keep only the edges that satisfy the conditions\n", + " edges_to_remove = [\n", + " (u, v, k) for (u, v, k), condition in zip(sparse_drive_g.edges(keys=True), conditions) if not condition\n", + " ]\n", + " sparse_drive_g.remove_edges_from(edges_to_remove)\n", + "\n", + " # Clean nodes by removing isolated nodes from the graph\n", + " isolated_nodes = list(nx.isolates(sparse_drive_g))\n", + " sparse_drive_g.remove_nodes_from(isolated_nodes)\n", + "\n", + " return sparse_drive_g\n", + "\n", + "\n", + "\n", + " sparse_drive_g = get_sparse_graph(drive_g)\n", + "\n", + "\n", + " #print(\"Number of edges in the sparse graph:\", sparse_drive_g.number_of_edges())\n", + "\n", + "\n", + "\n", + " ## create a partitioned network (using the full graph and the sparse graph)\n", + "\n", + " # Make a copy of the original graph\n", + " drive_g_copy = drive_g.copy()\n", + "\n", + " ## Remove edges \n", + " drive_g_copy.remove_edges_from(sparse_drive_g.edges)\n", + "\n", + " ## Remove nodes\n", + " # Convert nodes to strings\n", + " sparse_drive_nodes_str = [str(node) for node in sparse_drive_g.nodes]\n", + " drive_g_copy.remove_nodes_from(sparse_drive_nodes_str)\n", + "\n", + " # clean nodes by removing isolated nodes from the graph\n", + " isolated_nodes = list(nx.isolates(drive_g_copy))\n", + " drive_g_copy.remove_nodes_from(isolated_nodes)\n", + "\n", + " len(drive_g_copy)\n", + "\n", + "\n", + "\n", + "\n", + " ## partition the full graph, by removing the sparse graph from it.\n", + "\n", + " # first nodes shared between sparse_drive_g and drive_g (these nodes are the connection between neighbourhoods and boundary roads)\n", + " shared_nodes = set(sparse_drive_g.nodes).intersection(drive_g_copy.nodes)\n", + "\n", + "\n", + " # we then need to remove nodes where junctions between two neighbourhood nodes and sparse graphs are present. \n", + " # we do this by adding new nodes the end of edges which intersect with the sparse graph, to split these junctions up\n", + " # Initialize a counter to generate unique indices for new nodes\n", + " node_counter = Counter()\n", + " # Iterate through shared nodes\n", + " for shared_node in shared_nodes:\n", + " # Find edges in drive_g connected to the shared node\n", + " drive_g_edges = list(drive_g_copy.edges(shared_node, data=True, keys=True))\n", + "\n", + " # Find edges in sparse_drive_g connected to the shared node\n", + " sparse_drive_g_edges = list(sparse_drive_g.edges(shared_node, data=True, keys=True))\n", + "\n", + " # Iterate through edges in drive_g connected to the shared node\n", + " for u, v, key, data in drive_g_edges:\n", + " # Check if the corresponding edge is not in sparse_drive_g\n", + " if (u, v, key) not in sparse_drive_g_edges:\n", + " # Create new end nodes for the edge in drive_g\n", + " new_u = f\"new_{u}\" if u == shared_node else u\n", + " new_v = f\"new_{v}\" if v == shared_node else v\n", + "\n", + " # Generate a unique index for each new node ID\n", + " new_u_id = f\"{new_u}_{key}_{node_counter[new_u]}\" if new_u != u else new_u\n", + " new_v_id = f\"{new_v}_{key}_{node_counter[new_v]}\" if new_v != v else new_v\n", + "\n", + " # Increment the counter for each new node\n", + " node_counter[new_u] += 1\n", + " node_counter[new_v] += 1\n", + "\n", + " # Add new nodes and update the edge\n", + " drive_g_copy.add_node(new_u_id, **drive_g_copy.nodes[u])\n", + " drive_g_copy.add_node(new_v_id, **drive_g_copy.nodes[v])\n", + "\n", + " drive_g_copy.add_edge(new_u_id, new_v_id, key=key, **data)\n", + "\n", + " # Check if the reverse edge already exists in drive_g_copy\n", + " if not drive_g_copy.has_edge(new_v_id, new_u_id, key):\n", + " # Create the reverse edge with new nodes\n", + " drive_g_copy.add_edge(new_v_id, new_u_id, key=key, **data)\n", + "\n", + " # Disconnect the shared node from the new edge\n", + " drive_g_copy.remove_edge(u, v, key)\n", + "\n", + " # Remove the shared node\n", + " drive_g_copy.remove_node(shared_node)\n", + "\n", + "\n", + "\n", + " # Find strongly connected components in the modified drive_g graph\n", + " drive_g_scc = list(nx.strongly_connected_components(drive_g_copy))\n", + "\n", + " # Create a color mapping for edges in each strongly connected component using random colors\n", + " edge_colors = {}\n", + " for i, component in enumerate(drive_g_scc):\n", + " color = (random.random(), random.random(), random.random()) # RGB tuple with random values\n", + " for edge in drive_g_copy.edges:\n", + " if edge[0] in component and edge[1] in component:\n", + " edge_colors[edge] = color\n", + "\n", + " # Plot the graph with edge colors and without nodes\n", + " #fig, ax = ox.plot_graph(drive_g_copy, edge_color=[edge_colors.get(edge, (0, 0, 0)) for edge in drive_g_copy.edges], node_size=0, show=False, close=False, figsize=(20, 20))\n", + " #ox.plot_graph(sparse_drive_g, ax=ax, edge_color='red', edge_linewidth=2, node_size=0, show=True)\n", + " #fig.show()\n", + "\n", + "\n", + " ## add ssc index to each neighbourhood\n", + "\n", + " # Create a mapping from nodes to their SCC index\n", + " node_scc_mapping = {node: i for i, scc in enumerate(drive_g_scc) for node in scc}\n", + "\n", + " # Add SCC attribute to edges\n", + " for u, v, key, data in drive_g_copy.edges(keys=True, data=True):\n", + " scc_index_u = node_scc_mapping.get(u, None)\n", + " scc_index_v = node_scc_mapping.get(v, None)\n", + " \n", + " # Add the SCC index as an attribute to the edge\n", + " drive_g_copy[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v\n", + "\n", + "\n", + " ## join neighbourhood mapping to orignial driving graph\n", + "\n", + " # Add SCC index attribute to drive_g\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " scc_index_u = node_scc_mapping.get(u, None)\n", + " scc_index_v = node_scc_mapping.get(v, None)\n", + " \n", + " # Add the SCC index as an attribute to the edge\n", + " drive_g[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v\n", + "\n", + "\n", + "\n", + " ## get random nodes\n", + "\n", + "\n", + "\n", + "\n", + " # Function to get random nodes present in both graphs for each node\n", + " def get_random_nodes_for_each(graph1, graph2):\n", + " random_nodes_for_each = {}\n", + " common_nodes = set(graph1.nodes()) & set(graph2.nodes())\n", + " total_common_nodes = len(common_nodes)\n", + " num_nodes = min(iterations, max(1, int(total_common_nodes * 0.9))) # 10% less than the total number of common nodes, capped at the input max iterations\n", + "\n", + " for node in common_nodes:\n", + " neighbors = list(set(graph1.neighbors(node)) & set(graph2.neighbors(node)))\n", + " if len(neighbors) >= num_nodes:\n", + " random_neighbors = random.sample(neighbors, num_nodes)\n", + " else:\n", + " random_neighbors = neighbors + random.sample(list(common_nodes - set(neighbors)), num_nodes - len(neighbors))\n", + " random_nodes_for_each[node] = random_neighbors\n", + " return random_nodes_for_each\n", + "\n", + "\n", + "\n", + " # Get random nodes for each common node\n", + " random_nodes_for_each = get_random_nodes_for_each(drive_g, sparse_drive_g)\n", + "\n", + "\n", + "\n", + " # Print random nodes for each common node\n", + " #for node, random_neighbors in random_nodes_for_each.items():\n", + " #print(f\"Random nodes for node {node}: {random_neighbors}\")\n", + "\n", + "\n", + " ## Find shortest paths \n", + "\n", + " # Convert the dictionary of nodes into a list of tuples\n", + " nodes_list = [(key, value) for key, values in random_nodes_for_each.items() for value in values]\n", + "\n", + " # Find shortest paths and store them in a dictionary\n", + " shortest_paths = {}\n", + " for start_node, end_node in nodes_list:\n", + " try:\n", + " shortest_path = nx.shortest_path(drive_g, start_node, end_node, weight='travel_time')\n", + " shortest_paths[(start_node, end_node)] = shortest_path\n", + " except nx.NetworkXNoPath:\n", + " continue\n", + " #print(f\"No path found between {start_node} and {end_node}. Skipping...\")\n", + "\n", + " # Print the shortest paths\n", + " #for key, value in shortest_paths.items():\n", + " #print(f\"Shortest path from {key[0]} to {key[1]}: {value}\")\n", + "\n", + "\n", + "\n", + "\n", + " ## find edges passed through\n", + "\n", + " edges_passed_through = set()\n", + "\n", + " for path in shortest_paths.values():\n", + " # Pair consecutive nodes to create edges\n", + " path_edges = [(path[i], path[i+1]) for i in range(len(path)-1)]\n", + " \n", + " # Check if each edge exists in the graph\n", + " for edge in path_edges:\n", + " if edge in drive_g.edges:\n", + " edges_passed_through.add(edge)\n", + "\n", + " # Convert the set of edges to a list if needed\n", + " edges_passed_through = list(edges_passed_through)\n", + "\n", + " for u, v, data in drive_g.edges(data=True):\n", + " if (u, v) in edges_passed_through or (v, u) in edges_passed_through:\n", + " data['rat_run'] = True\n", + " else:\n", + " data['rat_run'] = False\n", + "\n", + "\n", + " # Convert the NetworkX graph to a GeoDataFrame\n", + " drive_gdf_nodes, drive_gdf_edges = ox.graph_to_gdfs(drive_g)\n", + "\n", + " drive_gdf_edges = drive_gdf_edges.to_crs(27700)\n", + " drive_gdf_nodes = drive_gdf_nodes.to_crs(27700)\n", + "\n", + "\n", + " # Filter drive_gdf_edges to only include edges with 'rat_run' = True\n", + " rat_run_edges = drive_gdf_edges[drive_gdf_edges['rat_run'] == True]\n", + "\n", + " # reset crs\n", + " neighbourhoods = neighbourhoods.to_crs(27700)\n", + "\n", + " # Perform spatial join between neighbourhoods and rat_run_edges\n", + " join_result = gpd.sjoin(neighbourhoods, rat_run_edges, how='left', op='intersects')\n", + "\n", + " # Group by neighbourhood index and count the number of rat_run edges in each\n", + " rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count')\n", + "\n", + " # Group by neighbourhood index and count the number of rat_run edges in each\n", + " rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count')\n", + "\n", + " # reset crs\n", + " neighbourhoods = neighbourhoods.to_crs(27700)\n", + "\n", + " # Join rat_run_edge_count with neighbourhoods based on index\n", + " neighbourhoods = neighbourhoods.join(rat_run_edge_count.set_index('index'))\n", + "\n", + " return neighbourhoods, rat_run_edges, drive_gdf_edges\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def score_neighbourhoods(filters_results_gdf, access_results_gdf, neighbourhoods):\n", + " ## join all together\n", + "\n", + " results_gdf = gpd.GeoDataFrame(filters_results_gdf.merge(access_results_gdf, on=\"ID\", suffixes=('_filters', \"_access\")))\n", + " results_gdf = results_gdf.set_geometry('geometry_access')\n", + " final_results_gdf = results_gdf.merge(neighbourhoods[['ID', 'rat_run_edge_count']], on='ID', how='left')\n", + " final_results_gdf['geometry'] = final_results_gdf['geometry_filters']\n", + " final_results_gdf = final_results_gdf.set_geometry('geometry')\n", + " final_results_gdf.drop(columns=['geometry_filters', 'geometry_access'], inplace=True)\n", + "\n", + "\n", + "\n", + " # Define the scoring function for \"rat_run_edge_count\"\n", + " def score_rat_run_edge_count(value):\n", + " if value <= 1:\n", + " return 100\n", + " else:\n", + " return 100 / (2 ** value) # Exponetial scoring\n", + "\n", + " # Apply the scoring function to the \"rat_run_edge_count\" column\n", + " final_results_gdf[\"rat_run_score\"] = final_results_gdf[\"rat_run_edge_count\"].apply(score_rat_run_edge_count)\n", + "\n", + " import math\n", + "\n", + " def score_mean_distance_diff(value):\n", + " if value >= 0:\n", + " return 0\n", + " elif value <= -750: # set a 750m cut off\n", + " return 100\n", + " else:\n", + " normalized_value = abs(value) / 750 # Normalize the value between 0 and 1\n", + " score = 100 * (1 - math.exp(-5 * normalized_value)) # Exponential increase\n", + " return score\n", + "\n", + " # Apply the modified scoring function to the \"mean_distance_diff\" column\n", + " final_results_gdf[\"mean_distance_diff_score\"] = final_results_gdf[\"mean_distance_diff\"].apply(score_mean_distance_diff)\n", + "\n", + " def score_road_density_filters(value):\n", + " if value <= 0:\n", + " return 0\n", + " elif value >= 40:\n", + " return 100\n", + " else:\n", + " return (value / 40) * 100\n", + "\n", + " # Apply the scoring function to the \"road_density_filters\" column\n", + " final_results_gdf[\"filter_road_density_score\"] = final_results_gdf[\"filter_road_density\"].apply(score_road_density_filters)\n", + "\n", + " # Create the \"scored_neighbourhoods\" geodataframe with the necessary columns\n", + " scored_neighbourhoods = final_results_gdf[[\"geometry\", \"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]]\n", + "\n", + " # Calculate overall score\n", + " scored_neighbourhoods[\"overall_score\"] = (scored_neighbourhoods[\"rat_run_score\"] + scored_neighbourhoods[\"mean_distance_diff_score\"] + scored_neighbourhoods[\"filter_road_density_score\"]) / 3\n", + "\n", + " # Define weights for each score\n", + " weight_rat_run_score = 1\n", + " weight_mean_distance_diff_score = 0.25\n", + " weight_road_density_filters_score = 0.75\n", + "\n", + " weight_rat_run_score = through_route_weighting\n", + " weight_mean_distance_diff_score = permiablity_weighting\n", + " weight_road_density_filters_score = modal_filter_weighting\n", + "\n", + " # Calculate overall score with weights\n", + " scored_neighbourhoods[\"overall_score\"] = (\n", + " (weight_rat_run_score * scored_neighbourhoods[\"rat_run_score\"]) +\n", + " (weight_mean_distance_diff_score * scored_neighbourhoods[\"mean_distance_diff_score\"]) +\n", + " (weight_road_density_filters_score * scored_neighbourhoods[\"filter_road_density_score\"])\n", + " ) / (weight_rat_run_score + weight_mean_distance_diff_score + weight_road_density_filters_score)\n", + "\n", + "\n", + "\n", + " ## find elbow point for k-means clustering\n", + "\n", + " # Selecting the features for clustering\n", + " X = scored_neighbourhoods[[\"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]]\n", + "\n", + " # Initialize a list to store the within-cluster sum of squares (WCSS) for different values of K\n", + " wcss = []\n", + "\n", + " # Define the range of K values to try\n", + " k_values = range(1, 11)\n", + "\n", + " # Calculate WCSS for each value of K\n", + " for k in k_values:\n", + " kmeans = KMeans(n_clusters=k, random_state=42)\n", + " kmeans.fit(X)\n", + " wcss.append(kmeans.inertia_)\n", + "\n", + " # Plotting the elbow curve\n", + " plt.plot(k_values, wcss, marker='o')\n", + " plt.title('Elbow Method')\n", + " plt.xlabel('Number of Clusters (K)')\n", + " plt.ylabel('WCSS')\n", + " plt.xticks(k_values)\n", + " plt.show()\n", + "\n", + " ## Run k-means clustering\n", + " # Define the number of clusters\n", + " k = 2\n", + "\n", + " # Select the features for clustering\n", + " features = [\"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]\n", + "\n", + " # Extract the features from the dataframe\n", + " X = scored_neighbourhoods[features]\n", + "\n", + " # Initialize the KMeans model\n", + " kmeans = KMeans(n_clusters=k, random_state=42)\n", + "\n", + " # Fit the model to the data\n", + " kmeans.fit(X)\n", + "\n", + " # Get the cluster labels\n", + " cluster_labels = kmeans.labels_\n", + "\n", + " # Add the cluster labels to the dataframe\n", + " scored_neighbourhoods[\"cluster_label\"] = cluster_labels\n", + "\n", + " return scored_neighbourhoods" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_webmaps(filters, scored_neighbourhoods, rat_run_edges, boundary_roads, drive_gdf_edges): \n", + " ## maps\n", + " ## adjust geodataframe contents for plotting purposes\n", + "\n", + " replacement_map = {\n", + " 'barrier or bollard': 'Barrier or Bollard',\n", + " 'bus gate': 'Bus Gate',\n", + " 'one-way bike': 'Cycle Contraflow',\n", + " 'street continuation': 'Street Continuation'\n", + " }\n", + "\n", + " # Replace filter types in the DataFrame\n", + " filters['filter_type'] = filters['filter_type'].map(replacement_map).fillna(filters['filter_type'])\n", + "\n", + "\n", + " import folium\n", + " import branca.colormap as cm\n", + " from folium.plugins import MarkerCluster\n", + "\n", + "\n", + " # Assuming you have already loaded your GeoDataFrames: scored_neighbourhoods, filters_results_gdf, and streets_gdf\n", + "\n", + " # Calculate the centroid of the scored_neighbourhoods GeoDataFrame\n", + " centroid = scored_neighbourhoods.geometry.centroid.iloc[0]\n", + " center_latitude, center_longitude = centroid.y, centroid.x\n", + "\n", + " # Create a Folium map centered around the centroid of scored_neighbourhoods\n", + " m = folium.Map(location=[center_latitude, center_longitude], zoom_start=12)\n", + "\n", + " # Define the colormap using cm.linear.viridis\n", + " cmap = cm.linear.viridis.scale(scored_neighbourhoods['overall_score'].min(), scored_neighbourhoods['overall_score'].max())\n", + "\n", + " # Plot scored_neighbourhoods using the Viridis colormap\n", + " folium.GeoJson(scored_neighbourhoods,\n", + " name= \"Scored Neighbourhoods\",\n", + " style_function=lambda x: {'fillColor': cmap(x['properties']['overall_score']),\n", + " 'color': cmap(x['properties']['overall_score']),\n", + " 'weight': 1, 'fillOpacity': 0.7},\n", + " tooltip=folium.features.GeoJsonTooltip(\n", + " fields=['rat_run_score', 'mean_distance_diff_score', 'filter_road_density_score', 'overall_score', 'cluster_label'],\n", + " aliases=['Rat Run Score', 'Mean Distance Diff Score', 'Filter Road Density Score', 'Overall Score', 'Cluster Label'])\n", + " ).add_to(m)\n", + "\n", + " # Plot streets_gdf on the map with default blue color and slightly transparent\n", + " streets_layer = folium.GeoJson(drive_gdf_edges,\n", + " name=\"Streets\",\n", + " style_function=lambda x: {'color': 'lightgreen', 'weight': 1, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + " # Plot rat_run_edges on the map with red color\n", + " rat_runs_layer = folium.GeoJson(rat_run_edges,\n", + " name=\"Rat Runs\",\n", + " style_function=lambda x: {'color': 'red', 'weight': 1.5, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + " # Plot boundary_roads on the map with orange color and thicker weight\n", + " boundary_roads_layer = folium.GeoJson(boundary_roads,\n", + " name=\"Busy Roads\",\n", + " style_function=lambda x: {'color': 'orange', 'weight': 3, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + "\n", + "\n", + " # Create a feature group for each type of layer\n", + " point_group = folium.FeatureGroup(name='Modal Filtering Points', show=True)\n", + " line_group = folium.FeatureGroup(name='Modal Filtering Streets', show=True)\n", + "\n", + "\n", + "\n", + " # Plot purple point markers for filters with tooltips\n", + " for _, row in filters.iterrows():\n", + " if row.geometry.type == 'Point':\n", + " tooltip_text = f\"Filter type: {row['filter_type']}\" # Concatenating \"Filter type:\" with the 'filter_type' value\n", + " folium.CircleMarker(location=[row.geometry.y, row.geometry.x], radius=2, color='purple', fill=True, fill_color='purple', tooltip=tooltip_text).add_to(point_group)\n", + " elif row.geometry.type == 'MultiLineString' or row.geometry.type == 'LineString':\n", + " tooltip_text = f\"Filter type: {row['filter_type']}\" # Concatenating \"Filter type:\" with the 'filter_type' value\n", + " folium.GeoJson(row.geometry, style_function=lambda x: {'color': 'purple', 'weight': 1.5, 'fillOpacity': 0.7}, tooltip=tooltip_text).add_to(line_group)\n", + "\n", + "\n", + " # Add layer groups to the map\n", + " point_group.add_to(m)\n", + " line_group.add_to(m)\n", + "\n", + " # Add layer control\n", + " folium.LayerControl(autoZIndex=True).add_to(m)\n", + "\n", + " cmap.caption = 'LTN Plausiblity Scores (Possible range: 0-100)'\n", + " cmap.add_to(m)\n", + "\n", + "\n", + " # add text\n", + " from folium import IFrame\n", + "\n", + " # Define the HTML content for the text\n", + " html_text = \"\"\"\n", + "
\n", + "

Scored neighbourhoods show a LTN 'Plausibility' score which incorporates metrics based on the presence of rat-runs, modal filters and measures of neighbourhood permeability. Map results are experimental, and should be treated as such. Get in touch via c.larkin@newcastle.ac.uk or https://github.com/Froguin99/LTN-Detection.

\n", + "
\n", + " \"\"\"\n", + "\n", + " # Add the HTML content to the map\n", + " folium.MacroElement().add_to(m)\n", + " m.get_root().html.add_child(folium.Element(html_text))\n", + "\n", + "\n", + " # save to geopackage\n", + "\n", + " # Extract place name without \", United Kingdom\"\n", + " place_name = place.replace(\", United Kingdom\", \"\").strip()\n", + "\n", + " # Create the file paths\n", + " map_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\Examples\\maps', f'{place_name}_example.html')\n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\scored_neighbourhoods', f'scored_neighbourhoods_{place_name}.gpkg')\n", + "\n", + " # Export map\n", + " m.save(map_file_path)\n", + "\n", + " # Send to geopackage \n", + " geometry_column = scored_neighbourhoods.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in scored_neighbourhoods.columns:\n", + " if column != geometry_column:\n", + " scored_neighbourhoods[column] = scored_neighbourhoods[column].astype(str)\n", + "\n", + " scored_neighbourhoods.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + " ## export rat runs \n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\rat_runs', f'rat_runs_{place_name}.gpkg')\n", + "\n", + "\n", + " # Send to geopackage \n", + " geometry_column = rat_run_edges.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in rat_run_edges.columns:\n", + " if column != geometry_column:\n", + " rat_run_edges[column] = rat_run_edges[column].astype(str)\n", + "\n", + " rat_run_edges.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + "\n", + " ## export modal filters\n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\filters', f'filters_{place_name}.gpkg')\n", + "\n", + "\n", + " # Send to geopackage \n", + " geometry_column = filters.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in filters.columns:\n", + " if column != geometry_column:\n", + " filters[column] = filters[column].astype(str)\n", + "\n", + " filters.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + "\n", + " # Display the map\n", + " #m\n", + "\n", + " print(\"Finished\", place)\n", + "\n", + " return m" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# default is 1000\n", + "iterations = 1000\n", + "\n", + "# default values are 1, 0.25, 0.75\n", + "through_route_weighting = 1\n", + "permiablity_weighting = 0.25\n", + "modal_filter_weighting = 0.75\n", + "\n", + "# set place \n", + "place = \"City of Chester, United Kingdom\"\n", + "\n", + "neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets, boundary, all_streets, boundary_roads = define_neighbourhoods(place, return_all = True)\n", + "permiablity_metrics = neighbourhood_permeability(place, neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets)\n", + "modal_filter_metrics, modal_filters = get_modal_filters(place, neighbourhoods, boundary, all_streets)\n", + "rat_run_metrics, rat_runs, drive_edges_gdf = get_rat_runs(place, neighbourhoods)\n", + "scored_neighbourhoods = score_neighbourhoods(modal_filter_metrics, permiablity_metrics, rat_run_metrics)\n", + "maps = create_webmaps(modal_filters, scored_neighbourhoods, rat_runs, boundary_roads, drive_edges_gdf)\n", + "\n", + "# display map\n", + "#m" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Linux/ltn_detection_linux.py b/Linux/ltn_detection_linux.py new file mode 100644 index 0000000..4c9782e --- /dev/null +++ b/Linux/ltn_detection_linux.py @@ -0,0 +1,2048 @@ +# %% +### set up python +## Library imports +import geopandas as gpd +import osmnx as ox +import networkx as nx +import momepy +import matplotlib.pyplot as plt +import folium +import pandas as pd +import overpy +from shapely.geometry import LineString +from shapely.geometry import Point +import requests +from shapely.geometry import MultiPolygon +from shapely.geometry import Polygon +import statistics +from shapely.ops import unary_union +import random +import overpy +import os +import math +from itertools import count +from collections import Counter +from sklearn.cluster import KMeans +from osmnx._errors import InsufficientResponseError +from owslib.wms import WebMapService +from rasterio.mask import mask as rio_mask +from rasterio.features import shapes +from shapely.geometry import shape, mapping +from rasterio.io import MemoryFile +import numpy as np +from shapely.ops import unary_union +import warnings +from shapely.errors import ShapelyDeprecationWarning + + + +## Mute warnings +warnings.simplefilter(action='ignore', category=FutureWarning) +warnings.simplefilter(action='ignore', category=pd.errors.SettingWithCopyWarning) +warnings.simplefilter(action='ignore', category=ShapelyDeprecationWarning) +warnings.simplefilter(action='ignore', category=UserWarning) + + + +## Update settings +# update osmnx settings +useful_tags_ways = ox.settings.useful_tags_way + ['cycleway'] + ['bicycle'] + ['motor_vehicle'] + ['railway'] + ['tunnel'] + ['barrier'] + ['bus'] + ['access'] + ['oneway'] + ['oneway:bicycle'] + ['covered'] + ['waterway'] +ox.config(use_cache=True, + log_console=True, + useful_tags_way=useful_tags_ways + ) + + + +# %% +# Define the path to your text file +file_path = '/home/chrisl/output.txt' + +# Initialize an empty list to store the lines +places = [] + +# Open the file and read each line +with open(file_path, 'r') as file: + for line in file: + # Strip the newline character and any surrounding whitespace + place = line.strip() + # Append the line to the list + places.append(place) + +# Print the list of places +print("List of places read from input file") +for place in places: + print(place) + +print("Getting OS Roads...") +# %% +def get_OS_roads(): + """ + Reads in OS Open Road data from a GeoPackage file. + + Returns: + os_open_roads (GeoDataFrame): A GeoDataFrame containing road data. + """ + os_open_roads = gpd.read_file("/home/chrisl/data/oproad_roads_only.gpkg") + return os_open_roads + +os_open_roads = get_OS_roads() +print("OS Roads read") + + + +# %% +def define_neighbourhoods(place, return_all): + # get boundary + def set_location_boundary(place): + """ + Sets up the location boundary by geocoding the given place and buffering it. + + Parameters: + place (str): The name or address of the place to geocode. + + Returns: + geopandas.GeoDataFrame: The buffered boundary of the location. + """ + # Set location and get boundary + boundary = ox.geocode_to_gdf(place) + boundary = boundary.to_crs('EPSG:27700') + + # Buffer boundary to ensure clips include riverlines which may act as borders between geographies + boundary_buffered = boundary.buffer(50) + + return boundary_buffered, boundary + + boundary_buffered, boundary = set_location_boundary(place) + + + + + """ + This code retrieves street nodes and edges for walking and driving from OpenStreetMap within our area boundary, and loads the OS Open Roads network dataset. + + Functions: + - get_street_networks: Retrieves street networks for all, walking, and driving modes within the specified boundary. + """ + + def get_OSM_street_networks(boundary_buffered): + """ + Retrieves street networks for all, walking, and driving modes within the specified boundary. + + Parameters: + - boundary_buffered: A GeoDataFrame representing the boundary of the area of interest. + + Returns: + - all_edges: A GeoDataFrame containing the edges (streets) of the entire street network. + - all_nodes: A GeoDataFrame containing the nodes (intersections) of the entire street network. + - walk_edges: A GeoDataFrame containing the edges (streets) of the walking street network. + - walk_nodes: A GeoDataFrame containing the nodes (intersections) of the walking street network. + - drive_edges: A GeoDataFrame containing the edges (streets) of the driving street network. + - drive_nodes: A GeoDataFrame containing the nodes (intersections) of the driving street network. + - common_nodes_gdf: A GeoDataFrame containing the common nodes between the driving and walking street networks. + """ + + # Reset boundary_buffered crs for passing to OSM + boundary_buffered_4326 = boundary_buffered.to_crs('4326') + + # Get street networks + all_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='all', simplify=False) + walk_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='walk', simplify=True) + drive_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='drive', simplify=False) + + all_edges = ox.graph_to_gdfs(all_streets, nodes=False, edges=True) + all_nodes = ox.graph_to_gdfs(all_streets, nodes=True, edges=False) + + walk_edges = ox.graph_to_gdfs(walk_streets, nodes=False, edges=True) + walk_nodes = ox.graph_to_gdfs(walk_streets, nodes=True, edges=False) + + drive_edges = ox.graph_to_gdfs(drive_streets, nodes=False, edges=True) + drive_nodes = ox.graph_to_gdfs(drive_streets, nodes=True, edges=False) + + # Find the common nodes between networks + # This ensures that shortest paths between points should always be able to be calculated + common_nodes = drive_nodes.merge(walk_nodes, on='osmid', suffixes=('_drive', '_walk')) + common_nodes_gdf = gpd.GeoDataFrame(common_nodes, geometry='geometry_drive') + + return all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets + + + # get street networks + all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets = get_OSM_street_networks(boundary_buffered) + #os_open_roads = get_OS_roads() this is now got at the start of the code to avoid re-reading + + + def retrieve_osm_features(polygon, tags): + """ + Retrieves OpenStreetMap features based on the specified polygon and tags. + + Args: + polygon (Polygon): The polygon to retrieve features within. + tags (dict): The tags to filter the features. + + Returns: + GeoDataFrame: The retrieved OpenStreetMap features. + """ + try: + features = ox.features_from_polygon(polygon=polygon, tags=tags) + except Exception as e: + error_message = str(e) + if "There are no data elements in the server response" in error_message: + print("No data elements found for the specified location/tags.") + features = gpd.GeoDataFrame() # Create an empty GeoDataFrame + else: + # Handle other exceptions here if needed + print("An error occurred:", error_message) + features = None + return features + + + def get_railways(place): + """ + This retrievies and processes OpenStreetMap (OSM) railways data for a specified place. + + Args: + place (str): The name of the place to retrieve OSM features for. + + Returns: + railways (geopandas.GeoDataFrame): A GeoDataFrame containing the railways within the specified place. + """ + + # for unknown reasons, using rail = ox.graph_from_place(place, custom_filter='["railway"]') + # doesn't ALWAYS retrive the full rail network, hence why multiple lines are used to achive the same result + + # Define railway types to retrieve + railway_types = ["", "rail", "light_rail", "narrow_gauge", "subway", "tram"] + + # Initialize an empty graph + combined_railways = nx.MultiDiGraph() + + for railway_type in railway_types: + try: + # Fetch the railway network for the specified type + network = ox.graph_from_place(place, simplify=False, custom_filter=f'["railway"~"{railway_type}"]') + + # Ensure the fetched network is a MultiDiGraph + if not isinstance(network, nx.MultiDiGraph): + network = nx.MultiDiGraph(network) + + except Exception as e: + print(f"No railway data found for '{railway_type}'.") + network = nx.MultiDiGraph() + + # Compose the networks + combined_railways = nx.compose(combined_railways, network) + + # Convert to GeoDataFrame + railways = ox.graph_to_gdfs(combined_railways, nodes=False, edges=True) + + # Drop any other railway types that aren't needed + railways = railways.loc[(~railways["railway"].isin(["tunnel", "abandoned", "razed", "disused", "funicular", "monorail", "miniature"]))] + + # Drop rows where any of the specified columns have values "True" or "yes" + columns_to_check = ['tunnel', 'abandoned', 'razed', 'disused', 'funicular', 'monorail', 'miniature'] + railways = railways.loc[~railways[railways.columns.intersection(columns_to_check)].isin(['True', 'yes']).any(axis=1)] + + # Set railways CRS + railways = railways.to_crs('EPSG:27700') + + return railways + + + + + ## get urban footprints from GUF + + def get_guf(place): + """ + Retrieves a clipped GeoDataFrame of GUF urban areas within a specified place boundary. + + Parameters: + - place (str): The name or address of the place to retrieve urban areas for. + + Returns: + - gdf_clipped (GeoDataFrame): A GeoDataFrame containing the clipped urban areas within the specified place boundary. + """ + + # Step 1: Access the WMS Service + wms_url = 'https://geoservice.dlr.de/eoc/land/wms?GUF04_DLR_v1_Mosaic' + wms = WebMapService(wms_url, version='1.1.1') + + # Step 2: Identify the Layer with ID 102. This is the Global Urban Footprint layer GUF + for layer_name, layer in wms.contents.items(): + if '102' in layer_name: + print(f"Layer ID 102 found: {layer_name}") + + # Assuming 'GUF04_DLR_v1_Mosaic' is the layer with ID 102 + layer = 'GUF04_DLR_v1_Mosaic' # Replace with the actual layer name if different + + # Step 3: Get the polygon boundary using osmnx + boundary_gdf = ox.geocode_to_gdf(place) + boundary = boundary_gdf.to_crs('EPSG:27700') + # buffer boundary to ensure clips include riverlines which may act as borders between geographies + boundary_buffered = boundary.buffer(100) + boundary_buffered = boundary_buffered.to_crs('EPSG:4326') + boundary_polygon = boundary_gdf.geometry[0] + wms_boundary = boundary_buffered.geometry[0] + + # Convert the polygon to a bounding box + minx, miny, maxx, maxy = wms_boundary.bounds + + # Step 4: Request the data from WMS using the bounding box + width = 1024 + height = 1024 + response = wms.getmap( + layers=[layer], + srs='EPSG:4326', + bbox=(minx, miny, maxx, maxy), + size=(width, height), + format='image/geotiff' + ) + + # Step 5: Load the Raster Data into Rasterio + with MemoryFile(response.read()) as memfile: + with memfile.open() as src: + image = src.read(1) # Read the first band + transform = src.transform + crs = src.crs + + # Clip the raster data to the polygon + out_image, out_transform = rio_mask(src, [mapping(wms_boundary)], crop=True) # Use renamed mask function + out_meta = src.meta.copy() + out_meta.update({"driver": "GTiff", + "height": out_image.shape[1], + "width": out_image.shape[2], + "transform": out_transform, + "crs": crs}) + + # Step 6: Convert Raster to Vector + mask_arr = (out_image[0] != 0).astype(np.uint8) # Assuming non-zero values are urban areas + + shapes_gen = shapes(mask_arr, mask=mask_arr, transform=out_transform) + + polygons = [] + for geom, value in shapes_gen: + polygons.append(shape(geom)) + + # Create a GeoDataFrame from the polygons + gdf = gpd.GeoDataFrame({'geometry': polygons}, crs=crs) + + # Step 7: Create Buffers Around Urban Areas + buffer_distance = 100 # Buffer distance in meters (adjust as needed) + gdf_buffered = gdf.copy() + gdf_buffered['geometry'] = gdf['geometry'].buffer(buffer_distance) + + # Step 8: Clip the GeoDataFrame to the boundary of the place + gdf_clipped = gpd.clip(gdf, boundary_gdf) + + return gdf_clipped + + + guf = get_guf(place) + + + + + ## get residential areas + + def get_residential_areas(polygon): + polygon = polygon.to_crs('EPSG:4326') + # Retrieve features from OpenStreetMap + features = ox.features_from_polygon(polygon.iloc[0], tags={'landuse': 'residential'}) + + # Convert features to a GeoDataFrame + gdf = gpd.GeoDataFrame.from_features(features) + gdf = gdf.set_crs('EPSG:4326') + + return gdf + + + residential_areas = get_residential_areas(boundary_buffered) + + + + ## join urban foot prints and residential areas + # this is to create a single polygon of where neighbourhoods can be found within + + def join_geodataframes(gdf1, gdf2): + # Ensure both GeoDataFrames have the exact same CRS + target_crs = 'EPSG:4326' # WGS 84 + gdf1 = gdf1.to_crs(target_crs) + gdf2 = gdf2.to_crs(target_crs) + + # Concatenate GeoDataFrames + joined_gdf = pd.concat([gdf1, gdf2], ignore_index=True) + + return gpd.GeoDataFrame(joined_gdf, crs=target_crs) + + + guf_residential_gdf = join_geodataframes(guf, residential_areas) + + + ## create a small buffer to ensure all areas a captured correctly + + def buffer_geometries_in_meters(gdf, distance): + # Define the World Mercator projected CRS + projected_crs = 'EPSG:3395' # World Mercator + + # Project to the new CRS + gdf_projected = gdf.to_crs(projected_crs) + + # Buffer the geometries + gdf_projected['geometry'] = gdf_projected['geometry'].buffer(distance) + + # Reproject back to the original CRS + gdf_buffered = gdf_projected.to_crs(gdf.crs) + + return gdf_buffered + + + guf_residential_gdf = buffer_geometries_in_meters(guf_residential_gdf, 100) # Buffer by 100 meters + + + ## union into one gdf + + def unary_union_polygons(gdf): + # Combine all geometries into a single geometry + unified_geometry = unary_union(gdf['geometry']) + + # Create a new GeoDataFrame with a single row containing the unified geometry + combined_gdf = gpd.GeoDataFrame({'geometry': [unified_geometry]}, crs=gdf.crs) + + return combined_gdf + + + guf_residential_gdf = unary_union_polygons(guf_residential_gdf) + + # set to BNG + guf_residential_gdf = guf_residential_gdf.to_crs("27700") + + # Function to remove holes from neighbourhoods + def remove_holes(polygon): + if polygon.geom_type == 'Polygon': + return Polygon(polygon.exterior) + else: + return polygon + + # remove holes from urban footprint + guf_residential_gdf['geometry'] = guf_residential_gdf['geometry'].apply(remove_holes) + + + + + + + + def get_rivers(boundary_buffered): + """ + Retrieves river features within a given boundary. + + Args: + boundary_buffered (GeoDataFrame): A GeoDataFrame representing the buffered boundary. + + Returns: + GeoDataFrame: A GeoDataFrame containing the river features within the boundary. + """ + # Ensure the boundary is in the correct CRS for the query + boundary_buffered = boundary_buffered.to_crs('EPSG:4326') + + # Check the content of boundary_buffered to ensure it's not empty and correctly transformed + if boundary_buffered.empty: + raise ValueError("The provided boundary is empty.") + + # Define the tags for waterways + tags = {"waterway": ["river", "rapids"]} + + try: + # Fetch features from OSM using the boundary geometry + polygon = boundary_buffered.geometry.iloc[0] + rivers = ox.features_from_polygon(polygon=polygon, tags=tags) + + # Dropping rows where 'tunnel' is equal to 'culvert' + if 'tunnel' in rivers.columns: + rivers = rivers[rivers['tunnel'] != 'culvert'] + + # Convert the CRS back to the desired one + rivers = rivers.to_crs('EPSG:27700') + + # Set the geometry column explicitly + rivers = rivers.set_geometry('geometry') + + return rivers + + except InsufficientResponseError: + print("No data elements found for the given boundary and tags.") + empty_geometry = {'geometry': [LineString()]} + rivers = gpd.GeoDataFrame(empty_geometry, crs='EPSG:27700') + return rivers # Return an empty GeoDataFrame if no data found + + + + def get_landuse(boundary_buffered): + """ + Retrieves the landuse features based on the specified boundary. + + Args: + boundary_buffered (GeoDataFrame): The buffered boundary polygon. + + Returns: + GeoDataFrame: The landuse features. + """ + # reset boundary crs to allow for features to be found + boundary_buffered = boundary_buffered.to_crs('EPSG:4326') + # Define tags + tags = {"landuse": ["industrial", "railway", "brownfield", "commercial", "farmland", "meadow"]} + # Use ox.features_from_polygon to find features matching the specified tags + landuse = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags) + # set/reset crs + landuse = landuse.to_crs('27700') + + ## get unsuitable "nature" types + # Define tags + tags = {"natural": ["wood", "water", "scrub", "coastline", "beach"]} + # Use ox.features_from_polygon to find features matching the specified tags + nature = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags) + # set/reset crs + nature = nature.to_crs('27700') + + ## get unsuitable "lesiure" types. This is mainly for golfcourses + # Define tags + tags = {"leisure": ["golf_course", "track", "park"]} + # Use ox.features_from_polygon to find features matching the specified tags + leisure = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags) + # set/reset crs + leisure = leisure.to_crs('27700') + # Define the tags for aeroway + aeroway_tags = {"aeroway": ["aerodrome"]} + # Use the function to retrieve aeroway features + aeroway = retrieve_osm_features(polygon=boundary_buffered.iloc[0], tags=aeroway_tags) + # Check if any features were retrieved + if aeroway is not None: + if not aeroway.empty: + # set/reset crs + aeroway = aeroway.to_crs('27700') + + # concat + landuse = pd.concat([landuse, nature, leisure, aeroway]) + + ## resest boundary crs + boundary_buffered = boundary_buffered.to_crs('EPSG:27700') + + return landuse + + + def get_bus_routes(boundary_buffered): + """ + Retrieves bus routes from OSM/NAPTAN within a given boundary. + + Args: + boundary_buffered (GeoDataFrame): A GeoDataFrame representing the boundary. + + Returns: + bus_routes (GeoDataFrame): A GeoDataFrame containing the bus routes. + + Raises: + Exception: If there is an error fetching the data from the Overpass API. + """ + # reset boundary crs to allow for features to be found + boundary_buffered = boundary_buffered.to_crs('EPSG:4326') + + # Calculate the bounding box for XML query + bounding_box = boundary_buffered.bounds + + # Extract the minimum and maximum coordinates + minx = bounding_box['minx'].min() + miny = bounding_box['miny'].min() + maxx = bounding_box['maxx'].max() + maxy = bounding_box['maxy'].max() + + # Create a list of four elements representing the bounding box + bbox = [minx, miny, maxx, maxy] + + # reset boundary_buffer crs + boundary_buffered = boundary_buffered.to_crs('27700') + + # Define the Overpass API endpoint + overpass_url = "https://overpass-api.de/api/interpreter" + + # Define the XML query + xml_query = f""" + + + + + + + + + + + + + + + + + + + + + """ + + # Initialize lists to store data + geometries = [] + element_data = [] + + # Make the Overpass API request + response = requests.post(overpass_url, data=xml_query) + + # Check if the request was successful + if response.status_code == 200: + data = response.json() + + # Access the data from the response + for element in data.get("elements", []): + if element.get('type') == 'way' and 'geometry' in element: + # Extract geometry coordinates from 'geometry' field + coordinates = [(node['lon'], node['lat']) for node in element['geometry']] + # Create a LineString geometry + line = LineString(coordinates) + geometries.append(line) + element_data.append(element) + + # Create a GeoDataFrame + bus_routes = gpd.GeoDataFrame(element_data, geometry=geometries) + + # Set CRS + bus_routes = bus_routes.set_crs('4326') + bus_routes = bus_routes.to_crs('27700') + + return bus_routes + + else: + raise Exception(f"Error fetching data: {response.status_code} - {response.text}") + + + + def clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered): + """ + Clips the geospatial data to the boundary_buffered extent. + + Parameters: + - os_open_roads (GeoDataFrame): lines from OS Open roads. + - rivers (GeoDataFrame): lines of Rivers. + - railways (GeoDataFrame): lines of Railways. + - landuse (GeoDataFrame): Land use polygons. + - bus_routes (GeoDataFrame): lines of bus routes. + - boundary_buffered (GeoDataFrame): buffered boundary. + + Returns: + - clipped versions of input geodataframes, aside from the bufferd boundary. + """ + os_open_roads_clip = gpd.clip(os_open_roads, boundary_buffered) + rivers_clip = gpd.clip(rivers, boundary_buffered) + railways_clip = gpd.clip(railways, boundary_buffered) + landuse_clip = gpd.clip(landuse, boundary_buffered) + bus_routes_clip = gpd.clip(bus_routes, boundary_buffered) + + return os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip + + + def process_bus_routes(bus_routes_clip, buffer_distance): + """ + Count the number of bus routes per road and remove roads with more than one bus route on them. + + Args: + bus_routes_clip (GeoDataFrame): The input GeoDataFrame containing bus routes. + buffer_distance (float): The buffer distance to convert roads to polygons, set in meters. + + Returns: + GeoDataFrame: The filtered GeoDataFrame containing roads with less than or equal to one bus route. + """ + # Create a new GeoDataFrame with the buffered geometries + bus_routes_buffered = bus_routes_clip.copy() # Copy the original GeoDataFrame + bus_routes_buffered['geometry'] = bus_routes_buffered['geometry'].buffer(buffer_distance) + + # count the number of overlapping bus routes + def count_overlapping_features(gdf): + """ + Count the number of overlapping features in a GeoDataFrame. + + Args: + gdf (GeoDataFrame): The input GeoDataFrame. + + Returns: + GeoDataFrame: The input GeoDataFrame with an additional column 'Bus_routes_count' indicating the count of overlapping features. + """ + # Create an empty column to store the count of overlapping features + gdf['Bus_routes_count'] = 0 + + # Iterate through each row in the GeoDataFrame + for idx, row in gdf.iterrows(): + # Get the geometry of the current row + geometry = row['geometry'] + + # Use a spatial filter to find overlapping features + overlaps = gdf[gdf['geometry'].intersects(geometry)] + + # Update the Bus_routes_count column with the count of overlapping features + gdf.at[idx, 'Bus_routes_count'] = len(overlaps) + + return gdf + + # call function + bus_routes_buffered_with_count = count_overlapping_features(bus_routes_buffered) + + # drop any roads which have less than two bus routes on them + bus_routes_filtered = bus_routes_buffered_with_count[bus_routes_buffered_with_count['Bus_routes_count'] >= 2] + + return bus_routes_filtered + + + + def filter_OS_boundary_roads(os_open_roads_clip): + """ + Filter the `os_open_roads_clip` DataFrame to select boundary roads. + + This function filters the `os_open_roads_clip` DataFrame to select roads that are considered "boundary" roads. + The selection criteria include roads that have the following attributes: + - `primary_route` is True + - `trunk_road` is True + - `fictitious` is True + - `road_classification` is 'A Road' or 'B Road' + - `road_function` is 'Minor Road' or 'Motorway' + + The filtered DataFrame is returned. + + Note: The commented line `(os_open_roads_clip['road_function'] == 'Restricted Local Access Road')` is excluded from the selection. + + Parameters: + - os_open_roads_clip (DataFrame): A DataFrame containing road data. + + Returns: + - boundary_roads (DataFrame): A DataFrame containing the filtered boundary roads. + + Example usage: + # Assuming `os_open_roads_clip` is a DataFrame containing road data + boundary_roads = filter_boundary_roads(os_open_roads_clip) + """ + boundary_roads = os_open_roads_clip.loc[((os_open_roads_clip['primary_route'] == 'True') | + (os_open_roads_clip['trunk_road'] == 'True') | + (os_open_roads_clip['fictitious'] == 'True') | + (os_open_roads_clip['road_classification'] == 'A Road') | + (os_open_roads_clip['road_classification'] == 'B Road') | + (os_open_roads_clip['road_function'] == 'Minor Road') | + (os_open_roads_clip['road_function'] == 'Motorway') | + (os_open_roads_clip['road_function'] == 'Minor Road') + )] + return boundary_roads + + + + ## buffering and dissolving functions + + def buffer_and_dissolve(input_gdf): + """ + Buffer and dissolve a GeoDataFrame. + + Args: + input_gdf (GeoDataFrame): The input GeoDataFrame. + + Returns: + GeoDataFrame: The buffered and dissolved GeoDataFrame. + """ + # Buffer around boundaries + buffered_gdf = input_gdf.copy() # Create a copy to avoid modifying the original + buffered_gdf['geometry'] = buffered_gdf['geometry'].buffer(5) # set a 5 meter buffer + + # Dissolve the geometries + dissolved_geo = buffered_gdf.unary_union + + # Create a new GeoDataFrame with the dissolved geometry + dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo]) + + # Set the CRS (Coordinate Reference System) + dissolved_gdf.crs = input_gdf.crs + + return dissolved_gdf + + + def dissolve_gdf(input_gdf): + # dissolve geometries + dissolved_geo = input_gdf.unary_union + dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo]) + dissolved_gdf.crs = input_gdf.crs + + return dissolved_gdf + + + def erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf): + """ + Erases boundary features from the given boundary geometry. + + Parameters: + - boundary: GeoDataFrame representing the boundary geometry + - boundary_rivers_bd: GeoDataFrame representing the rivers boundary features + - boundary_roads_bd: GeoDataFrame representing the roads boundary features + - boundary_rail_bd: GeoDataFrame representing the rail boundary features + - boundary_landuse_bd: GeoDataFrame representing the landuse boundary features + - boundary_bus_routes_bd: GeoDataFrame representing the bus routes boundary features + + Returns: + - erased_boundary_gdf: GeoDataFrame containing the result of the "Erase" operation + """ + + # ensure that neighbourhoods fall only within urban footprint areas + boundary = gpd.clip(boundary, guf_residential_gdf) + + # Join all boundary features + boundaries = pd.concat([boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd], ignore_index=True) + boundary_features = dissolve_gdf(boundaries) + + # Use the `difference` method to perform the "Erase" operation + erased_boundary = boundary.difference(boundary_features.unary_union) + + # Convert the GeoSeries to a single geometry using unary_union + erased_boundary = erased_boundary.unary_union + + # Create a new GeoDataFrame with the result of "Erase" operation + erased_boundary_gdf = gpd.GeoDataFrame(geometry=[erased_boundary], crs=boundary.crs) + + # Explode multipolygon to polygons + erased_boundary_gdf = erased_boundary_gdf.explode() + + return erased_boundary_gdf + + + def drop_large_or_small_areas(neighbourhoods): + """ + Drops rows from the 'neighbourhoods' DataFrame where the area is less than 10,000 square units or greater than 5,000,000 square units. + + Parameters: + - neighbourhoods (DataFrame): The input DataFrame containing neighbourhood data. + + Returns: + - neighbourhoods (DataFrame): The updated DataFrame with small areas dropped. + """ + # Calculate area + neighbourhoods["area"] = neighbourhoods.geometry.area + + # Drop rows where area is less than 10,000 or greater than 5,000,000 + neighbourhoods = neighbourhoods.loc[(neighbourhoods["area"] >= 10000)] + neighbourhoods = neighbourhoods.loc[(neighbourhoods["area"] <= 5000000)] + + return neighbourhoods + + + def filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, polygon_column_name): + """ + Count the number of roads within each polygon in a GeoDataFrame and filter the neighbourhoods based on road count and road density. + + Args: + neighbourhoods (GeoDataFrame): GeoDataFrame containing neighbourhood polygons. + os_open_roads_clip (GeoDataFrame): GeoDataFrame containing road data. + polygon_column_name (str): Name of the column in neighbourhoods to use for grouping. + + Returns: + GeoDataFrame: Updated neighbourhoods GeoDataFrame with filtered rows based on road count and road density. + """ + + def count_roads_within_polygons(polygons_gdf, roads_gdf, polygon_column_name): + """ + Count the number of roads within each polygon in a GeoDataFrame. + + Args: + polygons_gdf (GeoDataFrame): GeoDataFrame containing polygons. + roads_gdf (GeoDataFrame): GeoDataFrame containing roads. + polygon_column_name (str): Name of the column in polygons_gdf to use for grouping. + + Returns: + GeoDataFrame: Original polygons GeoDataFrame with a "road_count" column added. + """ + + # spatial join + joined = gpd.sjoin(polygons_gdf, roads_gdf, how='left', op='intersects') + + # Group by the polygon column and count the number of roads in each + road_counts = joined.groupby(polygon_column_name).size().reset_index(name='road_count') + + # Merge the road counts back into the polygons GeoDataFrame + polygons_gdf = polygons_gdf.merge(road_counts, on=polygon_column_name, how='left') + + # Calculate road density (area divided by road_count). It is multiplied by 10000 for ease of understanding the numbers involved with this + polygons_gdf['road_density'] = (polygons_gdf['road_count'] / polygons_gdf['area'] ) * 10000 + + return polygons_gdf + + neighbourhoods = count_roads_within_polygons(neighbourhoods, os_open_roads_clip, polygon_column_name) + + # Drop rows with road_density below 0.2 or less than 4 roads + neighbourhoods = neighbourhoods[(neighbourhoods['road_count'] > 2)] + neighbourhoods = neighbourhoods[(neighbourhoods['road_density'] > 0.2)] + + return neighbourhoods + + + def remove_holes(polygon): + """ + Removes holes from a polygon. Mostly for visual reasons. + + Parameters: + polygon (Polygon): The polygon to remove holes from. + + Returns: + Polygon: The polygon without holes. + """ + if polygon.geom_type == 'Polygon': + return Polygon(polygon.exterior) + else: + return polygon + + landuse = get_landuse(boundary_buffered) + rivers = get_rivers(boundary_buffered) + railways = get_railways(place) + landuse = get_landuse(boundary_buffered) + bus_routes = get_bus_routes(boundary_buffered) + os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip = clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered) + bus_routes_clip = process_bus_routes(bus_routes_clip, 0.2) + boundary_roads = filter_OS_boundary_roads(os_open_roads_clip) + + ## buffer and dissolve + boundary_roads_bd = buffer_and_dissolve(boundary_roads) + boundary_rivers_bd = buffer_and_dissolve(rivers_clip) + boundary_rail_bd = buffer_and_dissolve(railways_clip) + boundary_landuse_bd = buffer_and_dissolve(landuse_clip) + boundary_bus_routes_bd = buffer_and_dissolve(bus_routes_clip) + + ## geodataframe cleaning + erased_boundary_gdf = erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf) + neighbourhoods = erased_boundary_gdf + neighbourhoods = drop_large_or_small_areas(neighbourhoods) + + neighbourhoods = filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, 'geometry') + + ## create unique IDs + # simple number based ID + neighbourhoods['ID'] = range(1, len(neighbourhoods) + 1) + + neighbourhoods['geometry'] = neighbourhoods['geometry'].apply(remove_holes) + + + ## filter neighbourhoods to only locations with more than 1 intersection (1 or fewer intersections indicates that all travel modes will be the same) + # reset neighbourhoods crs + neighbourhoods = neighbourhoods.to_crs('4326') + + # Spatial join to count points within each neighborhood + spatial_join = gpd.sjoin(neighbourhoods, common_nodes_gdf, how='left', op='contains') + + # Group by 'ID' and count the points within each neighborhood + point_counts = spatial_join.groupby('ID').size().reset_index(name='point_count') + + # Filter out neighborhoods with 1 or 0 points + filtered_neighbourhood_ids = point_counts[point_counts['point_count'] > 1]['ID'] + + neighbourhoods= neighbourhoods[neighbourhoods['ID'].isin(filtered_neighbourhood_ids)] + + + + ## we also need to join the length of the streets within the neighbourhood for further analysis + # Reset index of neighbourhoods + neighbourhoods = neighbourhoods.reset_index(drop=True) + + # reset neighbourhoods crs + neighbourhoods = neighbourhoods.to_crs('27700') + + # Perform a spatial join + joined_data = gpd.sjoin(os_open_roads_clip, neighbourhoods, how="inner", op="intersects") + + # Group by neighborhood and calculate total road length + road_lengths = joined_data.groupby('index_right')['length'].sum().reset_index() + + # Merge road_lengths with neighbourhoods and drop 'index_right' column + neighbourhoods = neighbourhoods.merge(road_lengths, left_index=True, right_on='index_right', how='left').drop(columns=['index_right']) + + # Rename the column + neighbourhoods.rename(columns={'length': 'road_lengths'}, inplace=True) + + if return_all == False: + return neighbourhoods + else: + return neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets, boundary, all_streets, boundary_roads + + +# %% +def neighbourhood_permeability(place, neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets): + ### find permablity + + ## all to all shortest path calculation + def calculate_distance_stats_from_points(points_gdf, network): + all_pairs_shortest_paths = {} + points_osmids = points_gdf.index.tolist() # Assuming the 'osmid' is the index in the GeoDataFrame + + for start_node in points_osmids: + shortest_paths = {} + try: + for end_node in points_osmids: + if start_node != end_node: + distance = nx.shortest_path_length(network, start_node, end_node, weight='length') + shortest_paths[end_node] = distance + all_pairs_shortest_paths[start_node] = shortest_paths + except nx.NetworkXNoPath: + # If no path is found, skip adding to all_pairs_shortest_paths + continue + + distances = [length for paths in all_pairs_shortest_paths.values() for length in paths.values()] + + mean_distance = statistics.mean(distances) + median_distance = statistics.median(distances) + min_distance = min(distances) + max_distance = max(distances) + distance_range = max_distance - min_distance + total_distance = sum(distances) + + return { + "mean_distance": mean_distance, + "median_distance": median_distance, + "min_distance": min_distance, + "max_distance": max_distance, + "distance_range": distance_range, + "total_distance": total_distance + } + + ## processing for all to all + results = [] + + for index, row in neighbourhoods.iterrows(): + neighbourhood = neighbourhoods.loc[[index]] + + ## get neighbourhood boundary and neighbourhood boundary buffer + # set crs + neighbourhood = neighbourhood.to_crs('27700') + # create a buffer neighbourhood + neighbourhood_buffer = neighbourhood['geometry'].buffer(15) + # convert back to a geodataframe (for later on) + neighbourhood_buffer = gpd.GeoDataFrame(geometry=neighbourhood_buffer) + # reset crs + neighbourhood, neighbourhood_buffer = neighbourhood.to_crs('4326'), neighbourhood_buffer.to_crs('4326') + + + ## get nodes which can be driven to and walked to within area + neighbourhood_nodes = gpd.clip(common_nodes_gdf, neighbourhood_buffer) + + ## get length of total edges within the neighbourhood + edges_within_neighbourhood = gpd.sjoin(all_edges, neighbourhood, how="inner", op="intersects") + total_length = edges_within_neighbourhood['length'].sum() + + + ## calculate neighbourhood distance stats for walking and driving + walk_stats = calculate_distance_stats_from_points(neighbourhood_nodes, walk_streets) + drive_stats = calculate_distance_stats_from_points(neighbourhood_nodes, drive_streets) + + + ## Add the statistics to the GeoDataFrame + neighbourhood['walk_mean_distance'] = walk_stats['mean_distance'] + neighbourhood['walk_median_distance'] = walk_stats['median_distance'] + neighbourhood['walk_min_distance'] = walk_stats['min_distance'] + neighbourhood['walk_max_distance'] = walk_stats['max_distance'] + neighbourhood['walk_distance_range'] = walk_stats['distance_range'] + neighbourhood['walk_total_distance'] = walk_stats['total_distance'] + + neighbourhood['drive_mean_distance'] = drive_stats['mean_distance'] + neighbourhood['drive_median_distance'] = drive_stats['median_distance'] + neighbourhood['drive_min_distance'] = drive_stats['min_distance'] + neighbourhood['drive_max_distance'] = drive_stats['max_distance'] + neighbourhood['drive_distance_range'] = drive_stats['distance_range'] + neighbourhood['drive_total_distance'] = drive_stats['total_distance'] + + ## Store statistics along with neighborhood ID or other identifying information + result = { + 'neighbourhood_id': neighbourhood['ID'].iloc[0], # Assuming you have an ID column + 'walk_mean_distance': walk_stats['mean_distance'], + 'walk_median_distance': walk_stats['median_distance'], + 'walk_total_distance': walk_stats['total_distance'], + + + 'drive_mean_distance': drive_stats['mean_distance'], + 'drive_median_distance': drive_stats['median_distance'], + 'drive_total_distance': drive_stats['total_distance'], + + 'total_edge_length': total_length + } + results.append(result) + + ## Convert the results to a new dataframe + results_df = pd.DataFrame(results) + + + ## calculate differances + + results_df['mean_distance_diff'] = results_df['walk_mean_distance'] - results_df['drive_mean_distance'] + results_df['median_distance_diff'] = results_df['walk_median_distance'] - results_df['drive_median_distance'] + results_df['total_distance_diff'] = results_df['walk_total_distance'] - results_df['drive_total_distance'] + + merged_df = pd.merge(neighbourhoods, results_df, left_on = "ID", right_on = "neighbourhood_id") + access_results_gdf = gpd.GeoDataFrame(merged_df, geometry='geometry') + + return access_results_gdf + +# %% +def get_modal_filters(place, neighbourhoods, boundary, all_streets): + def get_barriers(boundary): + """ + Find modal filters within a given boundary. + + Args: + boundary (geopandas.GeoDataFrame): A GeoDataFrame representing the boundary. + + Returns: + barriers (geopandas.GeoDataFrame): A GeoDataFrame containing the modal filters. + streets_gdf (geopandas.GeoDataFrame): A GeoDataFrame containing the streets from OSM. + + """ + + # get the boundary in the correct CRS for OSMnx + boundary_4326 = boundary.to_crs('EPSG:4326') + + # get the most "basic" filters mapped, the barriers/bollards etc + # Define tags + tags = {"barrier": ["bollard", "bus_trap", "entrance", "planter", "sump_buster", "wedge"]} + + # Use ox.features_from_polygon to find features matching the specified tags + barriers = ox.features_from_polygon(polygon=boundary_4326.geometry.iloc[0], tags=tags) + + # process any linestrings into point geometries + # Filter the GeoDataFrame to select only rows with "linestring" geometry + barriers_linestrings = barriers[barriers['geometry'].geom_type == 'LineString'] + + # Create an empty GeoDataFrame to store the individual points + points_gdf = gpd.GeoDataFrame(columns=list(barriers_linestrings.columns), crs=barriers_linestrings.crs) + + # Iterate through each row in the GeoDataFrame with linestrings + for idx, row in barriers_linestrings.iterrows(): + if isinstance(row['geometry'], LineString): + # Extract the individual points from the linestring + points = [Point(coord) for coord in list(row['geometry'].coords)] + + # Create a GeoDataFrame from the individual points and copy the attributes + points_df = gpd.GeoDataFrame(geometry=points, crs=barriers_linestrings.crs) + for col in barriers_linestrings.columns: + if col != 'geometry': + points_df[col] = row[col] + + # Rename the "geometry" column to "merged_geometry" + points_df = points_df.rename(columns={'geometry': 'merged_geometry'}) + + # Append the points to the points_gdf + points_gdf = pd.concat([points_gdf, points_df], ignore_index=True) + + # Now, points_gdf contains all the individual points from the linestrings with inherited attributes + + # Remove the "geometry" column from the points GeoDataFrame + points_gdf = points_gdf.drop(columns=['geometry']) + + # Remove the linestring rows from the original GeoDataFrame + barriers = barriers[barriers['geometry'].geom_type != 'LineString'] + + # Rename the "merged_geometry" column to "geometry" in the points GeoDataFrame + points_gdf = points_gdf.rename(columns={'merged_geometry': 'geometry'}) + + # Concatenate the individual points GeoDataFrame to the original GeoDataFrame + barriers = pd.concat([barriers, points_gdf], ignore_index=True) + + # Reset the index to ensure it is continuous + barriers.reset_index(drop=True, inplace=True) + + # Create a new column "previously_linestring" and set it to False initially + barriers['previously_linestring'] = False + + # Iterate through each row in the GeoDataFrame with linestrings + for idx, row in barriers_linestrings.iterrows(): + if isinstance(row['geometry'], LineString): + # Extract the individual points from the linestring + points = [Point(coord) for coord in list(row['geometry'].coords)] + + # Iterate through the points in the linestring + for point in points: + # Check if the point's geometry intersects with any of the original linestrings + mask = barriers['geometry'].intersects(point) + if mask.any(): + # If the point intersects with any linestring, set "previously_linestring" to True + barriers.loc[mask, 'previously_linestring'] = True + + # add a unique ID + barriers['barrier_id'] = range(1, len(barriers) + 1) + + # Convert the OSMnx graph to a GeoDataFrame of streets + streets_gdf = ox.graph_to_gdfs(all_streets, nodes=False, edges=True) + + # join the barriers to the streets + streets_gdf = gpd.sjoin(streets_gdf, barriers, how="left", op="intersects") + + # clean geodataframe and drop streets without a barrier + streets_gdf.columns = streets_gdf.columns.str.replace("_right", "_barrier").str.replace("_left", "_street") + # we need to double check the name of "barrier" + streets_gdf['barrier_barrier'] = streets_gdf['barrier'] if 'barrier' in streets_gdf.columns else streets_gdf[ + 'barrier_barrier'] + + if 'name_street' in streets_gdf.columns: + streets_gdf = streets_gdf.rename(columns={'name_street': 'name'}) + barrier_streets = streets_gdf.dropna(subset=['barrier_barrier']) + + # add barrier tag + barrier_streets['filter_type'] = 'barrier or bollard' + + ## extract points which are on/within 1m of streets only + streets_gdf['has_barrier'] = 'yes' + + # reset crs before spatial join + barriers, streets_gdf = barriers.to_crs(3857), streets_gdf.to_crs(3857) + + barriers = gpd.sjoin_nearest(barriers, streets_gdf, how="left", max_distance=1) + barriers = barriers.dropna(subset=['has_barrier']) + barriers = barriers.reset_index(drop=True) # Reset the index + + # Dissolve based on the 'geometry' column + barriers = barriers.dissolve(by='barrier_id_right') + + # add barrier tag + barriers['filter_type'] = 'barrier or bollard' + + # Reset the index to remove multi-index + barriers.reset_index(drop=True, inplace=True) + + return barriers, streets_gdf + + + def get_bus_gates(streets_gdf): + """ + Finds all the bus gates within the given streets GeoDataFrame. + + Parameters: + streets_gdf (GeoDataFrame): A GeoDataFrame containing street data. + + Returns: + busgates (GeoDataFrame): A GeoDataFrame containing the bus gates found in the streets data. + + """ + + # we need to double check the name of "access" + streets_gdf['access_street'] = streets_gdf['access'] if 'access' in streets_gdf.columns else streets_gdf['access_street'] + streets_gdf['bicycle_street'] = streets_gdf['bicycle'] if 'bicycle' in streets_gdf.columns else streets_gdf['bicycle_street'] + streets_gdf['bus'] = streets_gdf['bus_street'] if 'bus_street' in streets_gdf.columns else streets_gdf['bus'] + + busgates = streets_gdf[((streets_gdf["bus"] == "yes") & (streets_gdf["access_street"] == "no") & (streets_gdf["bicycle_street"] == "yes")) | + (streets_gdf["bus"] == "yes") & (streets_gdf["motor_vehicle_street"] == "no") & (streets_gdf["bicycle_street"] == "yes") + ] + + # add bus gate tag + busgates['filter_type'] = 'bus gate' + + return busgates, streets_gdf + + def get_contraflows(streets_gdf): + """ + Finds the unrestricted one-way streets for cycling but restricted for cars. + + Parameters: + streets_gdf (GeoDataFrame): A GeoDataFrame containing street data. + + Returns: + GeoDataFrame: A GeoDataFrame containing the unrestricted one-way streets for cycling. + """ + + # Find one-way streets where cycling is unrestricted but cars are restricted + oneways = streets_gdf[(streets_gdf["oneway"] == True) & (streets_gdf["oneway:bicycle"] == "no")] + + # Dissolve the roads with the same name to avoid miscounting the total number of oneways + oneways['name'] = oneways['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x)) + oneways = oneways.dissolve(by='name') + + # Reset the index + oneways = oneways.reset_index() + + # Add one-way bike tag + oneways['filter_type'] = 'one-way bike' + + return oneways + + + + def filter_streets_continuations(input_gdf): + ## clean dataframe + # Check if 'highway_street' column exists and rename it to 'highway' + if 'highway_street' in input_gdf.columns: + input_gdf.rename(columns={'highway_street': 'highway'}, inplace=True) + + + + + # filter dataframe + ## remove indoor roads, these are likey pedestrian only however often don't have any "cycling" related tag + if 'covered' in input_gdf.columns: + input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'covered' in str(x))] + input_gdf = input_gdf[input_gdf['covered'] != 'yes'] + ## also remove footways and steps, as these are almost pedestrain only, never cyclable + input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'footway' in str(x))] + input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'steps' in str(x))] + + + + ## clean dataframe + input_gdf['name'] = input_gdf['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x)) + input_gdf['highway'] = input_gdf['highway'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x)) + + + + + ## perform street continunation filtering + # Grouping by 'name' and checking for groups with 'pedestrian' and another highway type + grouped = input_gdf.groupby('name').filter(lambda x: any('pedestrian' in val for val in x['highway']) and len(x['highway'].unique()) > 1) + street_continuations_gdf = grouped[grouped['highway'].str.contains('pedestrian', case=False, na=False)] # Extracting the rows containing 'pedestrian' in the highway column + + ## deal with nan names + + + ## dissolve lines that are very very close to each other + if not street_continuations_gdf.empty: + street_continuations_gdf = street_continuations_gdf.to_crs('27700') + street_continuations_gdf['buffer'] = street_continuations_gdf.geometry.buffer(1) + dissolved = street_continuations_gdf.dissolve(by='name') + + # If a MultiPolygon is formed, convert it to individual polygons + if isinstance(dissolved.geometry.iloc[0], MultiPolygon): + dissolved = dissolved.explode() + + # Remove the buffer column + dissolved = dissolved.drop(columns='buffer') + street_continuations_gdf = dissolved.to_crs('4326') + + return street_continuations_gdf + + + + barriers, streets_gdf = get_barriers(boundary) + busgates, streets_gdf = get_bus_gates(streets_gdf) + oneways = get_contraflows(streets_gdf) + streets_continuations_gdf = filter_streets_continuations(streets_gdf) + + # add street conitinuation tag + streets_continuations_gdf['filter_type'] = 'street continuation' + + + ## ensure correct crs + barriers, busgates, oneways, streets_continuations_gdf = barriers.to_crs('4326'), busgates.to_crs('4326'), oneways.to_crs('4326'), streets_continuations_gdf.to_crs('4326') + + filters = gpd.GeoDataFrame(pd.concat([barriers, busgates, oneways, streets_continuations_gdf], ignore_index=True)) + + + + ## alter neighbourhoods before joining + # Reset neighbourhood CRS + filters_results_gdf = neighbourhoods.to_crs('EPSG:27700') + + # Buffer to ensure all filters are captured + filters_results_gdf['geometry'] = filters_results_gdf['geometry'].buffer(5) + + # Reset neighbourhood CRS + filters_results_gdf = filters_results_gdf.to_crs('EPSG:4326') + + ## Spatial join + # Perform a spatial join between neighbourhoods and filters + joined_data = gpd.sjoin(filters_results_gdf, filters, how="left", predicate="intersects", lsuffix='_neigh', rsuffix='_filt') + + # Count the number of each filter within each neighbourhood + filter_type_counts = joined_data.groupby(['ID', 'filter_type']).size().unstack(fill_value=0) + + # Reset the index to make it more readable + filter_type_counts = filter_type_counts.reset_index() + + # Merge the filter_type_counts DataFrame with the neighbourhoods GeoDataFrame on the ID column + filters_results_gdf = filters_results_gdf.merge(filter_type_counts, on='ID', how='left') + + # Define the columns to sum + columns_to_sum = ['barrier or bollard', 'one-way bike', 'bus gate', 'street continuation'] + + # Filter out columns that exist in the DataFrame + existing_columns = [col for col in columns_to_sum if col in filters_results_gdf.columns] + + # Sum the values in the existing columns per row + filters_results_gdf['total_filter_types'] = filters_results_gdf[existing_columns].sum(axis=1) + + # Fill NaN values with 0 if necessary + filters_results_gdf = filters_results_gdf.fillna(0) + + # Find locations where filters are found dense + # Convert road density to numeric if not already + filters_results_gdf['road_density'] = pd.to_numeric(filters_results_gdf['road_density'], errors='coerce') + + # Create new column to hold filters * density value + filters_results_gdf['filter_road_density'] = filters_results_gdf['total_filter_types'] * filters_results_gdf['road_density'] + + return filters_results_gdf, filters + +# %% +def get_rat_runs(place, neighbourhoods): + #### rat runs + + + drive_g = ox.graph_from_place(place, network_type='drive', simplify=True) + + ## Clean graph and calculate travel times along edges + + # Function to clean 'maxspeed' values + def clean_maxspeed(maxspeed): + if maxspeed is None: + return 30 # Replace None with a default value of 30 + elif isinstance(maxspeed, str) and ' mph' in maxspeed: + return float(maxspeed.replace(' mph', '')) + elif isinstance(maxspeed, list): # Handle cases where 'maxspeed' is a list + return [float(speed.replace(' mph', '')) for speed in maxspeed] + else: + return maxspeed + + # Apply the function to 'maxspeed' in each edge attribute + for u, v, key, data in drive_g.edges(keys=True, data=True): + if 'maxspeed' in data: + data['maxspeed'] = clean_maxspeed(data['maxspeed']) + else: + data['maxspeed'] = 30 # Assign default value of 30 if 'maxspeed' is missing + + # Function to convert 'maxspeed' to a numeric value + def convert_maxspeed(maxspeed): + if isinstance(maxspeed, list) and maxspeed: # Check if 'maxspeed' is a non-empty list + # If 'maxspeed' is a list, convert the first value to a numeric value + return convert_single_maxspeed(maxspeed[0]) + else: + # If 'maxspeed' is not a list or an empty list, convert the single value to a numeric value + return convert_single_maxspeed(maxspeed) + + # Helper function to convert a single maxspeed value to a numeric value + def convert_single_maxspeed(maxspeed): + if maxspeed is None: + return 30 # Replace None with a default value of 30 + + if isinstance(maxspeed, str): + # Extract numeric part of the string using regular expression + numeric_part = ''.join(c for c in maxspeed if c.isdigit() or c == '.') + return float(numeric_part) if numeric_part else 30 # Default value if no numeric part found + elif isinstance(maxspeed, (int, float)): + return maxspeed + else: + return 30 # Default value if the type is unknown + + # Function to calculate travel time + def calculate_travel_time(length, maxspeed): + # Convert 'maxspeed' to a numeric value + maxspeed_value = convert_maxspeed(maxspeed) + + # Convert maxspeed to meters per second + speed_mps = maxspeed_value * 0.44704 # 1 mph = 0.44704 m/s + + # Calculate travel time in seconds using the formula: time = distance/speed + if length is not None and speed_mps > 0: + travel_time = length / speed_mps + return travel_time + else: + return None + + # Apply the function to 'length' and 'maxspeed' in each edge attribute + for u, v, key, data in drive_g.edges(keys=True, data=True): + if 'length' in data: + data['travel_time'] = calculate_travel_time(data.get('length'), data.get('maxspeed')) + + + + def get_sparse_graph(drive_g): + """ + Create a sparse graph from bounding roads. + + Args: + drive_g (networkx.Graph): The original graph. + + Returns: + networkx.Graph: The sparse graph. + """ + # Create a copy of the original graph + sparse_drive_g = drive_g.copy() + + # Define the conditions for keeping edges + conditions = [ + ( + data.get('highway') in ['trunk', 'trunk_link', 'motorway', 'motorway_link', 'primary', 'primary_link', + 'secondary', 'secondary_link', 'tertiary', 'tertiary_link'] + ) or ( + data.get('maxspeed') in ['60', '70', '40', ('20', '50'), ('30', '60'), ('30', '50'), ('70', '50'), + ('40', '60'), ('70', '60'), ('60', '40'), ('50', '40'), ('30', '40'), + ('20', '60'), ('70 ', '40 '), ('30 ', '70')] + ) + for u, v, k, data in sparse_drive_g.edges(keys=True, data=True) + ] + + # Keep only the edges that satisfy the conditions + edges_to_remove = [ + (u, v, k) for (u, v, k), condition in zip(sparse_drive_g.edges(keys=True), conditions) if not condition + ] + sparse_drive_g.remove_edges_from(edges_to_remove) + + # Clean nodes by removing isolated nodes from the graph + isolated_nodes = list(nx.isolates(sparse_drive_g)) + sparse_drive_g.remove_nodes_from(isolated_nodes) + + return sparse_drive_g + + + + sparse_drive_g = get_sparse_graph(drive_g) + + + #print("Number of edges in the sparse graph:", sparse_drive_g.number_of_edges()) + + + + ## create a partitioned network (using the full graph and the sparse graph) + + # Make a copy of the original graph + drive_g_copy = drive_g.copy() + + ## Remove edges + drive_g_copy.remove_edges_from(sparse_drive_g.edges) + + ## Remove nodes + # Convert nodes to strings + sparse_drive_nodes_str = [str(node) for node in sparse_drive_g.nodes] + drive_g_copy.remove_nodes_from(sparse_drive_nodes_str) + + # clean nodes by removing isolated nodes from the graph + isolated_nodes = list(nx.isolates(drive_g_copy)) + drive_g_copy.remove_nodes_from(isolated_nodes) + + len(drive_g_copy) + + + + + ## partition the full graph, by removing the sparse graph from it. + + # first nodes shared between sparse_drive_g and drive_g (these nodes are the connection between neighbourhoods and boundary roads) + shared_nodes = set(sparse_drive_g.nodes).intersection(drive_g_copy.nodes) + + + # we then need to remove nodes where junctions between two neighbourhood nodes and sparse graphs are present. + # we do this by adding new nodes the end of edges which intersect with the sparse graph, to split these junctions up + # Initialize a counter to generate unique indices for new nodes + node_counter = Counter() + # Iterate through shared nodes + for shared_node in shared_nodes: + # Find edges in drive_g connected to the shared node + drive_g_edges = list(drive_g_copy.edges(shared_node, data=True, keys=True)) + + # Find edges in sparse_drive_g connected to the shared node + sparse_drive_g_edges = list(sparse_drive_g.edges(shared_node, data=True, keys=True)) + + # Iterate through edges in drive_g connected to the shared node + for u, v, key, data in drive_g_edges: + # Check if the corresponding edge is not in sparse_drive_g + if (u, v, key) not in sparse_drive_g_edges: + # Create new end nodes for the edge in drive_g + new_u = f"new_{u}" if u == shared_node else u + new_v = f"new_{v}" if v == shared_node else v + + # Generate a unique index for each new node ID + new_u_id = f"{new_u}_{key}_{node_counter[new_u]}" if new_u != u else new_u + new_v_id = f"{new_v}_{key}_{node_counter[new_v]}" if new_v != v else new_v + + # Increment the counter for each new node + node_counter[new_u] += 1 + node_counter[new_v] += 1 + + # Add new nodes and update the edge + drive_g_copy.add_node(new_u_id, **drive_g_copy.nodes[u]) + drive_g_copy.add_node(new_v_id, **drive_g_copy.nodes[v]) + + drive_g_copy.add_edge(new_u_id, new_v_id, key=key, **data) + + # Check if the reverse edge already exists in drive_g_copy + if not drive_g_copy.has_edge(new_v_id, new_u_id, key): + # Create the reverse edge with new nodes + drive_g_copy.add_edge(new_v_id, new_u_id, key=key, **data) + + # Disconnect the shared node from the new edge + drive_g_copy.remove_edge(u, v, key) + + # Remove the shared node + drive_g_copy.remove_node(shared_node) + + + + # Find strongly connected components in the modified drive_g graph + drive_g_scc = list(nx.strongly_connected_components(drive_g_copy)) + + # Create a color mapping for edges in each strongly connected component using random colors + edge_colors = {} + for i, component in enumerate(drive_g_scc): + color = (random.random(), random.random(), random.random()) # RGB tuple with random values + for edge in drive_g_copy.edges: + if edge[0] in component and edge[1] in component: + edge_colors[edge] = color + + # Plot the graph with edge colors and without nodes + #fig, ax = ox.plot_graph(drive_g_copy, edge_color=[edge_colors.get(edge, (0, 0, 0)) for edge in drive_g_copy.edges], node_size=0, show=False, close=False, figsize=(20, 20)) + #ox.plot_graph(sparse_drive_g, ax=ax, edge_color='red', edge_linewidth=2, node_size=0, show=True) + #fig.show() + + + ## add ssc index to each neighbourhood + + # Create a mapping from nodes to their SCC index + node_scc_mapping = {node: i for i, scc in enumerate(drive_g_scc) for node in scc} + + # Add SCC attribute to edges + for u, v, key, data in drive_g_copy.edges(keys=True, data=True): + scc_index_u = node_scc_mapping.get(u, None) + scc_index_v = node_scc_mapping.get(v, None) + + # Add the SCC index as an attribute to the edge + drive_g_copy[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v + + + ## join neighbourhood mapping to orignial driving graph + + # Add SCC index attribute to drive_g + for u, v, key, data in drive_g.edges(keys=True, data=True): + scc_index_u = node_scc_mapping.get(u, None) + scc_index_v = node_scc_mapping.get(v, None) + + # Add the SCC index as an attribute to the edge + drive_g[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v + + + + ## get random nodes + + + + + # Function to get random nodes present in both graphs for each node + def get_random_nodes_for_each(graph1, graph2): + random_nodes_for_each = {} + common_nodes = set(graph1.nodes()) & set(graph2.nodes()) + total_common_nodes = len(common_nodes) + num_nodes = min(iterations, max(1, int(total_common_nodes * 0.9))) # 10% less than the total number of common nodes, capped at the input max iterations + + for node in common_nodes: + neighbors = list(set(graph1.neighbors(node)) & set(graph2.neighbors(node))) + if len(neighbors) >= num_nodes: + random_neighbors = random.sample(neighbors, num_nodes) + else: + random_neighbors = neighbors + random.sample(list(common_nodes - set(neighbors)), num_nodes - len(neighbors)) + random_nodes_for_each[node] = random_neighbors + return random_nodes_for_each + + + + # Get random nodes for each common node + random_nodes_for_each = get_random_nodes_for_each(drive_g, sparse_drive_g) + + + + # Print random nodes for each common node + #for node, random_neighbors in random_nodes_for_each.items(): + #print(f"Random nodes for node {node}: {random_neighbors}") + + + ## Find shortest paths + + # Convert the dictionary of nodes into a list of tuples + nodes_list = [(key, value) for key, values in random_nodes_for_each.items() for value in values] + + # Find shortest paths and store them in a dictionary + shortest_paths = {} + for start_node, end_node in nodes_list: + try: + shortest_path = nx.shortest_path(drive_g, start_node, end_node, weight='travel_time') + shortest_paths[(start_node, end_node)] = shortest_path + except nx.NetworkXNoPath: + continue + #print(f"No path found between {start_node} and {end_node}. Skipping...") + + # Print the shortest paths + #for key, value in shortest_paths.items(): + #print(f"Shortest path from {key[0]} to {key[1]}: {value}") + + + + + ## find edges passed through + + edges_passed_through = set() + + for path in shortest_paths.values(): + # Pair consecutive nodes to create edges + path_edges = [(path[i], path[i+1]) for i in range(len(path)-1)] + + # Check if each edge exists in the graph + for edge in path_edges: + if edge in drive_g.edges: + edges_passed_through.add(edge) + + # Convert the set of edges to a list if needed + edges_passed_through = list(edges_passed_through) + + for u, v, data in drive_g.edges(data=True): + if (u, v) in edges_passed_through or (v, u) in edges_passed_through: + data['rat_run'] = True + else: + data['rat_run'] = False + + + # Convert the NetworkX graph to a GeoDataFrame + drive_gdf_nodes, drive_gdf_edges = ox.graph_to_gdfs(drive_g) + + drive_gdf_edges = drive_gdf_edges.to_crs(27700) + drive_gdf_nodes = drive_gdf_nodes.to_crs(27700) + + + # Filter drive_gdf_edges to only include edges with 'rat_run' = True + rat_run_edges = drive_gdf_edges[drive_gdf_edges['rat_run'] == True] + + # reset crs + neighbourhoods = neighbourhoods.to_crs(27700) + + # Perform spatial join between neighbourhoods and rat_run_edges + join_result = gpd.sjoin(neighbourhoods, rat_run_edges, how='left', op='intersects') + + # Group by neighbourhood index and count the number of rat_run edges in each + rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count') + + # Group by neighbourhood index and count the number of rat_run edges in each + rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count') + + # reset crs + neighbourhoods = neighbourhoods.to_crs(27700) + + # Join rat_run_edge_count with neighbourhoods based on index + neighbourhoods = neighbourhoods.join(rat_run_edge_count.set_index('index')) + + return neighbourhoods, rat_run_edges, drive_gdf_edges + + + +# %% +def score_neighbourhoods(filters_results_gdf, access_results_gdf, neighbourhoods): + ## join all together + + results_gdf = gpd.GeoDataFrame(filters_results_gdf.merge(access_results_gdf, on="ID", suffixes=('_filters', "_access"))) + results_gdf = results_gdf.set_geometry('geometry_access') + final_results_gdf = results_gdf.merge(neighbourhoods[['ID', 'rat_run_edge_count']], on='ID', how='left') + final_results_gdf['geometry'] = final_results_gdf['geometry_filters'] + final_results_gdf = final_results_gdf.set_geometry('geometry') + final_results_gdf.drop(columns=['geometry_filters', 'geometry_access'], inplace=True) + + + + # Define the scoring function for "rat_run_edge_count" + def score_rat_run_edge_count(value): + if value <= 1: + return 100 + else: + return 100 / (2 ** value) # Exponetial scoring + + # Apply the scoring function to the "rat_run_edge_count" column + final_results_gdf["rat_run_score"] = final_results_gdf["rat_run_edge_count"].apply(score_rat_run_edge_count) + + import math + + def score_mean_distance_diff(value): + if value >= 0: + return 0 + elif value <= -750: # set a 750m cut off + return 100 + else: + normalized_value = abs(value) / 750 # Normalize the value between 0 and 1 + score = 100 * (1 - math.exp(-5 * normalized_value)) # Exponential increase + return score + + # Apply the modified scoring function to the "mean_distance_diff" column + final_results_gdf["mean_distance_diff_score"] = final_results_gdf["mean_distance_diff"].apply(score_mean_distance_diff) + + def score_road_density_filters(value): + if value <= 0: + return 0 + elif value >= 40: + return 100 + else: + return (value / 40) * 100 + + # Apply the scoring function to the "road_density_filters" column + final_results_gdf["filter_road_density_score"] = final_results_gdf["filter_road_density"].apply(score_road_density_filters) + + # Create the "scored_neighbourhoods" geodataframe with the necessary columns + scored_neighbourhoods = final_results_gdf[["geometry", "rat_run_score", "mean_distance_diff_score", "filter_road_density_score"]] + + # Calculate overall score + scored_neighbourhoods["overall_score"] = (scored_neighbourhoods["rat_run_score"] + scored_neighbourhoods["mean_distance_diff_score"] + scored_neighbourhoods["filter_road_density_score"]) / 3 + + # Define weights for each score + weight_rat_run_score = 1 + weight_mean_distance_diff_score = 0.25 + weight_road_density_filters_score = 0.75 + + weight_rat_run_score = through_route_weighting + weight_mean_distance_diff_score = permiablity_weighting + weight_road_density_filters_score = modal_filter_weighting + + # Calculate overall score with weights + scored_neighbourhoods["overall_score"] = ( + (weight_rat_run_score * scored_neighbourhoods["rat_run_score"]) + + (weight_mean_distance_diff_score * scored_neighbourhoods["mean_distance_diff_score"]) + + (weight_road_density_filters_score * scored_neighbourhoods["filter_road_density_score"]) + ) / (weight_rat_run_score + weight_mean_distance_diff_score + weight_road_density_filters_score) + + + + ## find elbow point for k-means clustering + + # Selecting the features for clustering + X = scored_neighbourhoods[["rat_run_score", "mean_distance_diff_score", "filter_road_density_score"]] + + # Initialize a list to store the within-cluster sum of squares (WCSS) for different values of K + wcss = [] + + # Define the range of K values to try + k_values = range(1, 11) + + # Calculate WCSS for each value of K + for k in k_values: + kmeans = KMeans(n_clusters=k, random_state=42) + kmeans.fit(X) + wcss.append(kmeans.inertia_) + + # Plotting the elbow curve + plt.plot(k_values, wcss, marker='o') + plt.title('Elbow Method') + plt.xlabel('Number of Clusters (K)') + plt.ylabel('WCSS') + plt.xticks(k_values) + plt.show() + + ## Run k-means clustering + # Define the number of clusters + k = 2 + + # Select the features for clustering + features = ["rat_run_score", "mean_distance_diff_score", "filter_road_density_score"] + + # Extract the features from the dataframe + X = scored_neighbourhoods[features] + + # Initialize the KMeans model + kmeans = KMeans(n_clusters=k, random_state=42) + + # Fit the model to the data + kmeans.fit(X) + + # Get the cluster labels + cluster_labels = kmeans.labels_ + + # Add the cluster labels to the dataframe + scored_neighbourhoods["cluster_label"] = cluster_labels + + return scored_neighbourhoods + +# %% +def create_webmaps(filters, scored_neighbourhoods, rat_run_edges, boundary_roads, drive_gdf_edges): + ## maps + ## adjust geodataframe contents for plotting purposes + + replacement_map = { + 'barrier or bollard': 'Barrier or Bollard', + 'bus gate': 'Bus Gate', + 'one-way bike': 'Cycle Contraflow', + 'street continuation': 'Street Continuation' + } + + # Replace filter types in the DataFrame + filters['filter_type'] = filters['filter_type'].map(replacement_map).fillna(filters['filter_type']) + + + import folium + import branca.colormap as cm + from folium.plugins import MarkerCluster + + + # Assuming you have already loaded your GeoDataFrames: scored_neighbourhoods, filters_results_gdf, and streets_gdf + + # Calculate the centroid of the scored_neighbourhoods GeoDataFrame + centroid = scored_neighbourhoods.geometry.centroid.iloc[0] + center_latitude, center_longitude = centroid.y, centroid.x + + # Create a Folium map centered around the centroid of scored_neighbourhoods + m = folium.Map(location=[center_latitude, center_longitude], zoom_start=12) + + # Define the colormap using cm.linear.viridis + cmap = cm.linear.viridis.scale(scored_neighbourhoods['overall_score'].min(), scored_neighbourhoods['overall_score'].max()) + + # Plot scored_neighbourhoods using the Viridis colormap + folium.GeoJson(scored_neighbourhoods, + name= "Scored Neighbourhoods", + style_function=lambda x: {'fillColor': cmap(x['properties']['overall_score']), + 'color': cmap(x['properties']['overall_score']), + 'weight': 1, 'fillOpacity': 0.7}, + tooltip=folium.features.GeoJsonTooltip( + fields=['rat_run_score', 'mean_distance_diff_score', 'filter_road_density_score', 'overall_score', 'cluster_label'], + aliases=['Rat Run Score', 'Mean Distance Diff Score', 'Filter Road Density Score', 'Overall Score', 'Cluster Label']) + ).add_to(m) + + # Plot streets_gdf on the map with default blue color and slightly transparent + streets_layer = folium.GeoJson(drive_gdf_edges, + name="Streets", + style_function=lambda x: {'color': 'lightgreen', 'weight': 1, 'fillOpacity': 0.7} + ).add_to(m) + + # Plot rat_run_edges on the map with red color + rat_runs_layer = folium.GeoJson(rat_run_edges, + name="Rat Runs", + style_function=lambda x: {'color': 'red', 'weight': 1.5, 'fillOpacity': 0.7} + ).add_to(m) + + # Plot boundary_roads on the map with orange color and thicker weight + boundary_roads_layer = folium.GeoJson(boundary_roads, + name="Busy Roads", + style_function=lambda x: {'color': 'orange', 'weight': 3, 'fillOpacity': 0.7} + ).add_to(m) + + + + # Create a feature group for each type of layer + point_group = folium.FeatureGroup(name='Modal Filtering Points', show=True) + line_group = folium.FeatureGroup(name='Modal Filtering Streets', show=True) + + + + # Plot purple point markers for filters with tooltips + for _, row in filters.iterrows(): + if row.geometry.type == 'Point': + tooltip_text = f"Filter type: {row['filter_type']}" # Concatenating "Filter type:" with the 'filter_type' value + folium.CircleMarker(location=[row.geometry.y, row.geometry.x], radius=2, color='purple', fill=True, fill_color='purple', tooltip=tooltip_text).add_to(point_group) + elif row.geometry.type == 'MultiLineString' or row.geometry.type == 'LineString': + tooltip_text = f"Filter type: {row['filter_type']}" # Concatenating "Filter type:" with the 'filter_type' value + folium.GeoJson(row.geometry, style_function=lambda x: {'color': 'purple', 'weight': 1.5, 'fillOpacity': 0.7}, tooltip=tooltip_text).add_to(line_group) + + + # Add layer groups to the map + point_group.add_to(m) + line_group.add_to(m) + + # Add layer control + folium.LayerControl(autoZIndex=True).add_to(m) + + cmap.caption = 'LTN Plausiblity Scores (Possible range: 0-100)' + cmap.add_to(m) + + + # add text + from folium import IFrame + + # Define the HTML content for the text + html_text = """ +
+

Scored neighbourhoods show a LTN 'Plausibility' score which incorporates metrics based on the presence of rat-runs, modal filters and measures of neighbourhood permeability. Map results are experimental, and should be treated as such. Get in touch via c.larkin@newcastle.ac.uk or https://github.com/Froguin99/LTN-Detection.

+
+ """ + + # Add the HTML content to the map + folium.MacroElement().add_to(m) + m.get_root().html.add_child(folium.Element(html_text)) + + + # save to geopackage + + # Extract place name without ", United Kingdom" + place_name = place.replace(", United Kingdom", "").strip() + + # Create the file paths + map_file_path = os.path.join('/home/chrisl/outputs', f'{place_name}_example.html') + geopackage_file_path = os.path.join('/home/chrisl/outputs', f'scored_neighbourhoods_{place_name}.gpkg') + + # Export map + m.save(map_file_path) + + # Send to geopackage + geometry_column = scored_neighbourhoods.geometry.name + + # Iterate through the columns and convert them to strings + for column in scored_neighbourhoods.columns: + if column != geometry_column: + scored_neighbourhoods[column] = scored_neighbourhoods[column].astype(str) + + scored_neighbourhoods.to_file(geopackage_file_path, driver="GPKG") + + + ## export rat runs + geopackage_file_path = os.path.join('/home/chrisl/outputs', f'rat_runs_{place_name}.gpkg') + + + # Send to geopackage + geometry_column = rat_run_edges.geometry.name + + # Iterate through the columns and convert them to strings + for column in rat_run_edges.columns: + if column != geometry_column: + rat_run_edges[column] = rat_run_edges[column].astype(str) + + rat_run_edges.to_file(geopackage_file_path, driver="GPKG") + + + + ## export modal filters + geopackage_file_path = os.path.join('/home/chrisl/outputs', f'filters_{place_name}.gpkg') + + + # Send to geopackage + geometry_column = filters.geometry.name + + # Iterate through the columns and convert them to strings + for column in filters.columns: + if column != geometry_column: + filters[column] = filters[column].astype(str) + + filters.to_file(geopackage_file_path, driver="GPKG") + + + + # Display the map + #m + + print("Finished", place) + + return m + +# %% +# default is 1000 +iterations = 1000 + +# default values are 1, 0.25, 0.75 +through_route_weighting = 1 +permiablity_weighting = 0.25 +modal_filter_weighting = 0.75 + + +print("Functions loaded") + +for place in places: + print("Starting", place) + neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets, boundary, all_streets, boundary_roads = define_neighbourhoods(place, return_all = True) + permiablity_metrics = neighbourhood_permeability(place, neighbourhoods, common_nodes_gdf, all_edges, walk_streets, drive_streets) + modal_filter_metrics, modal_filters = get_modal_filters(place, neighbourhoods, boundary, all_streets) + rat_run_metrics, rat_runs, drive_edges_gdf = get_rat_runs(place, neighbourhoods) + scored_neighbourhoods = score_neighbourhoods(modal_filter_metrics, permiablity_metrics, rat_run_metrics) + maps = create_webmaps(modal_filters, scored_neighbourhoods, rat_runs, boundary_roads, drive_edges_gdf) + print("Finished", place) + + + diff --git a/remote_desktop/ltn_scoring_3_12_3_mass_process.ipynb b/remote_desktop/ltn_scoring_3_12_3_mass_process.ipynb new file mode 100644 index 0000000..82de89d --- /dev/null +++ b/remote_desktop/ltn_scoring_3_12_3_mass_process.ipynb @@ -0,0 +1,4554 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Welcome!\n", + "\n", + "This notebook will allow for the detection of modal filters, rat runs, and analysis of neighbourhood accessiablity within a single notebook. The output of this code is a set of neighbourhoods scored on their plausiablity to be a \"Low Traffic Neighbourhood\", which is written to a geopackage. To run this code you will need the OS Open Roads dataset available on the OS website: https://osdatahub.os.uk/downloads/open/OpenRoads" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### General set up\n", + "Import libraries, set location etc." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "### set up python\n", + "## Library imports\n", + "import geopandas as gpd\n", + "import osmnx as ox\n", + "import networkx as nx\n", + "import momepy\n", + "import matplotlib.pyplot as plt\n", + "import folium\n", + "import pandas as pd\n", + "import overpy\n", + "from shapely.geometry import LineString\n", + "from shapely.geometry import Point\n", + "import requests\n", + "from shapely.geometry import MultiPolygon\n", + "from shapely.geometry import Polygon\n", + "import statistics\n", + "from shapely.ops import unary_union\n", + "import random\n", + "import overpy\n", + "import os \n", + "import math\n", + "from itertools import count\n", + "from collections import Counter\n", + "from sklearn.cluster import KMeans\n", + "#from osmnx._errors import InsufficientResponseError\n", + "from owslib.wms import WebMapService\n", + "from rasterio.mask import mask as rio_mask \n", + "from rasterio.features import shapes\n", + "from shapely.geometry import shape, mapping\n", + "from rasterio.io import MemoryFile\n", + "import numpy as np\n", + "from shapely.ops import unary_union\n", + "import warnings\n", + "from shapely.errors import ShapelyDeprecationWarning\n", + "\n", + "\n", + "\n", + "## Mute warnings\n", + "warnings.simplefilter(action='ignore', category=FutureWarning)\n", + "warnings.simplefilter(action='ignore', category=pd.errors.SettingWithCopyWarning)\n", + "warnings.simplefilter(action='ignore', category=ShapelyDeprecationWarning)\n", + "warnings.simplefilter(action='ignore', category=UserWarning)\n", + "\n", + "\n", + "\n", + "## Update settings\n", + "# update osmnx settings\n", + "useful_tags_ways = ox.settings.useful_tags_way + ['cycleway'] + ['bicycle'] + ['motor_vehicle'] + ['railway'] + ['tunnel'] + ['barrier'] + ['bus'] + ['access'] + ['oneway'] + ['oneway:bicycle'] + ['covered'] + ['waterway']\n", + "ox.config(use_cache=True, \n", + " log_console=True,\n", + " useful_tags_way=useful_tags_ways\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Read in place names\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "List of places read from test.txt:\n", + "City of Chester, United Kingdom\n" + ] + } + ], + "source": [ + "# Define the path to your text file\n", + "file_path = r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\test.txt'\n", + "\n", + "# Initialize an empty list to store the lines\n", + "places = []\n", + "\n", + "# Open the file and read each line\n", + "with open(file_path, 'r') as file:\n", + " for line in file:\n", + " # Strip the newline character and any surrounding whitespace\n", + " place = line.strip()\n", + " # Append the line to the list\n", + " places.append(place)\n", + "\n", + "# Print the list of places\n", + "print(\"List of places read from test.txt:\")\n", + "for place in places:\n", + " print(place)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set iterations parameter" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "iterations = 1000" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Read in OS Roads\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def get_OS_roads():\n", + " \"\"\"\n", + " Reads in OS Open Road data from a GeoPackage file.\n", + "\n", + " Returns:\n", + " os_open_roads (GeoDataFrame): A GeoDataFrame containing road data.\n", + " \"\"\"\n", + " os_open_roads = gpd.read_file(r\"C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\oproad_gpkg_gb\\Data\\oproad_roads_only.gpkg\")\n", + " return os_open_roads\n", + "\n", + "os_open_roads = get_OS_roads()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:240: UserWarning: Geometry is in a geographic CRS. Results from 'buffer' are likely incorrect. Use 'GeoSeries.to_crs()' to re-project geometries to a projected CRS before this operation.\n", + "\n", + " gdf_buffered['geometry'] = gdf['geometry'].buffer(buffer_distance)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No data elements found for the specified location/tags.\n", + "No railway data found for 'light_rail'.\n", + "No railway data found for 'narrow_gauge'.\n", + "No railway data found for 'subway'.\n", + "No railway data found for 'tram'.\n", + "No data elements found for the specified location/tags.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:719: FutureWarning: Currently, index_parts defaults to True, but in the future, it will default to False to be consistent with Pandas. Use `index_parts=True` to keep the current behavior and True/False to silence the warning.\n", + " erased_boundary_gdf = erased_boundary_gdf.explode()\n", + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:829: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " neighbourhoods = filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, 'geometry')\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3505: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " exec(code_obj, self.user_global_ns, self.user_ns)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No path found between 8300557 and 7761331. Skipping...\n", + "No path found between 8300557 and 7761328. Skipping...\n", + "No path found between 10010638 and 7761331. Skipping...\n", + "No path found between 10010638 and 7761328. Skipping...\n", + "No path found between 8300561 and 7761331. Skipping...\n", + "No path found between 8300561 and 7761328. Skipping...\n", + "No path found between 8300562 and 7761328. Skipping...\n", + "No path found between 8300563 and 7761331. Skipping...\n", + "No path found between 8300563 and 7761328. Skipping...\n", + "No path found between 8300569 and 7761331. Skipping...\n", + "No path found between 8300569 and 7761328. Skipping...\n", + "No path found between 8300576 and 7761331. Skipping...\n", + "No path found between 8300576 and 7761328. Skipping...\n", + "No path found between 9095200 and 7761331. Skipping...\n", + "No path found between 9095200 and 7761328. Skipping...\n", + "No path found between 8300578 and 7761328. Skipping...\n", + "No path found between 8300578 and 7761331. Skipping...\n", + "No path found between 9095201 and 7761331. Skipping...\n", + "No path found between 9095201 and 7761328. Skipping...\n", + "No path found between 2827743265 and 7761331. Skipping...\n", + "No path found between 2827743266 and 7761331. Skipping...\n", + "No path found between 2827743266 and 7761328. Skipping...\n", + "No path found between 3630145573 and 7761328. Skipping...\n", + "No path found between 3630145573 and 7761331. Skipping...\n", + "No path found between 3630145575 and 7761328. Skipping...\n", + "No path found between 3630145575 and 7761331. Skipping...\n", + "No path found between 8300589 and 7761328. Skipping...\n", + "No path found between 8300589 and 7761331. Skipping...\n", + "No path found between 3630145581 and 7761331. Skipping...\n", + "No path found between 8300592 and 7761331. Skipping...\n", + "No path found between 8300592 and 7761328. Skipping...\n", + "No path found between 3835715634 and 7761331. Skipping...\n", + "No path found between 3835715634 and 7761328. Skipping...\n", + "No path found between 9095222 and 7761331. Skipping...\n", + "No path found between 9095222 and 7761328. Skipping...\n", + "No path found between 8083511 and 7761331. Skipping...\n", + "No path found between 3065157690 and 7761331. Skipping...\n", + "No path found between 3065157690 and 7761328. Skipping...\n", + "No path found between 8300625 and 7761331. Skipping...\n", + "No path found between 8300625 and 7761328. Skipping...\n", + "No path found between 8300626 and 7761331. Skipping...\n", + "No path found between 8300626 and 7761328. Skipping...\n", + "No path found between 883132505 and 7761331. Skipping...\n", + "No path found between 883132505 and 7761328. Skipping...\n", + "No path found between 8300639 and 7761331. Skipping...\n", + "No path found between 8300639 and 7761328. Skipping...\n", + "No path found between 8300647 and 7761328. Skipping...\n", + "No path found between 8300647 and 7761331. Skipping...\n", + "No path found between 10012786 and 7761331. Skipping...\n", + "No path found between 10012786 and 7761328. Skipping...\n", + "No path found between 9465982 and 7761328. Skipping...\n", + "No path found between 9465982 and 7761331. Skipping...\n", + "No path found between 9465987 and 7761328. Skipping...\n", + "No path found between 9465987 and 7761331. Skipping...\n", + "No path found between 9465990 and 7761328. Skipping...\n", + "No path found between 9465990 and 7761331. Skipping...\n", + "No path found between 9095311 and 7761331. Skipping...\n", + "No path found between 9095311 and 7761328. Skipping...\n", + "No path found between 1297723547 and 7761331. Skipping...\n", + "No path found between 8091815 and 7761328. Skipping...\n", + "No path found between 8091815 and 7761331. Skipping...\n", + "No path found between 8091816 and 7761328. Skipping...\n", + "No path found between 8091817 and 7761331. Skipping...\n", + "No path found between 8091817 and 7761328. Skipping...\n", + "No path found between 1691283624 and 7761328. Skipping...\n", + "No path found between 8091823 and 7761331. Skipping...\n", + "No path found between 8091839 and 7761328. Skipping...\n", + "No path found between 8091839 and 7761331. Skipping...\n", + "No path found between 8117586120 and 7761328. Skipping...\n", + "No path found between 8117586120 and 7761331. Skipping...\n", + "No path found between 10010831 and 7761328. Skipping...\n", + "No path found between 601045205 and 7761331. Skipping...\n", + "No path found between 601045205 and 7761328. Skipping...\n", + "No path found between 7379176 and 7761331. Skipping...\n", + "No path found between 7379176 and 7761328. Skipping...\n", + "No path found between 7379187 and 7761328. Skipping...\n", + "No path found between 7379187 and 7761331. Skipping...\n", + "No path found between 7379188 and 7761331. Skipping...\n", + "No path found between 7379188 and 7761328. Skipping...\n", + "No path found between 7379189 and 7761328. Skipping...\n", + "No path found between 7379189 and 7761331. Skipping...\n", + "No path found between 7379190 and 7761328. Skipping...\n", + "No path found between 7379190 and 7761331. Skipping...\n", + "No path found between 7379191 and 7761331. Skipping...\n", + "No path found between 7379191 and 7761328. Skipping...\n", + "No path found between 7379192 and 7761331. Skipping...\n", + "No path found between 7379194 and 7761331. Skipping...\n", + "No path found between 7379194 and 7761328. Skipping...\n", + "No path found between 26120444 and 7761328. Skipping...\n", + "No path found between 26120444 and 7761331. Skipping...\n", + "No path found between 364811 and 7761331. Skipping...\n", + "No path found between 364811 and 7761328. Skipping...\n", + "No path found between 364814 and 7761328. Skipping...\n", + "No path found between 364815 and 7761331. Skipping...\n", + "No path found between 364815 and 7761328. Skipping...\n", + "No path found between 364819 and 7761331. Skipping...\n", + "No path found between 364819 and 7761328. Skipping...\n", + "No path found between 364822 and 7761328. Skipping...\n", + "No path found between 4763054359 and 7761328. Skipping...\n", + "No path found between 4763054359 and 7761331. Skipping...\n", + "No path found between 364828 and 7761331. Skipping...\n", + "No path found between 364828 and 7761328. Skipping...\n", + "No path found between 9376034 and 7761328. Skipping...\n", + "No path found between 9376034 and 7761331. Skipping...\n", + "No path found between 9376036 and 7761328. Skipping...\n", + "No path found between 9376036 and 7761331. Skipping...\n", + "No path found between 364839 and 7761331. Skipping...\n", + "No path found between 364839 and 7761328. Skipping...\n", + "No path found between 606427435 and 7761331. Skipping...\n", + "No path found between 606427435 and 7761328. Skipping...\n", + "No path found between 364848 and 7761328. Skipping...\n", + "No path found between 364851 and 7761328. Skipping...\n", + "No path found between 364851 and 7761331. Skipping...\n", + "No path found between 7638526280 and 7761328. Skipping...\n", + "No path found between 7638526280 and 7761331. Skipping...\n", + "No path found between 7638526281 and 7761331. Skipping...\n", + "No path found between 7638526281 and 7761328. Skipping...\n", + "No path found between 2121251151 and 7761331. Skipping...\n", + "No path found between 2121251151 and 7761328. Skipping...\n", + "No path found between 364880 and 7761328. Skipping...\n", + "No path found between 364880 and 7761331. Skipping...\n", + "No path found between 364884 and 7761328. Skipping...\n", + "No path found between 364884 and 7761331. Skipping...\n", + "No path found between 8837239127 and 7761331. Skipping...\n", + "No path found between 8837239127 and 7761328. Skipping...\n", + "No path found between 2773541209 and 7761331. Skipping...\n", + "No path found between 2773541209 and 7761328. Skipping...\n", + "No path found between 8837239133 and 7761331. Skipping...\n", + "No path found between 8837239137 and 7761331. Skipping...\n", + "No path found between 8837239138 and 7761331. Skipping...\n", + "No path found between 8837239142 and 7761331. Skipping...\n", + "No path found between 8837239142 and 7761328. Skipping...\n", + "No path found between 364908 and 7761328. Skipping...\n", + "No path found between 364908 and 7761331. Skipping...\n", + "No path found between 8837239148 and 7761331. Skipping...\n", + "No path found between 8837239148 and 7761328. Skipping...\n", + "No path found between 364910 and 7761331. Skipping...\n", + "No path found between 364910 and 7761328. Skipping...\n", + "No path found between 364914 and 7761328. Skipping...\n", + "No path found between 364914 and 7761331. Skipping...\n", + "No path found between 2773541234 and 7761328. Skipping...\n", + "No path found between 2773541234 and 7761331. Skipping...\n", + "No path found between 364919 and 7761331. Skipping...\n", + "No path found between 364919 and 7761328. Skipping...\n", + "No path found between 364927 and 7761328. Skipping...\n", + "No path found between 364927 and 7761331. Skipping...\n", + "No path found between 364946 and 7761331. Skipping...\n", + "No path found between 364946 and 7761328. Skipping...\n", + "No path found between 364950 and 7761331. Skipping...\n", + "No path found between 364950 and 7761328. Skipping...\n", + "No path found between 364958 and 7761328. Skipping...\n", + "No path found between 364958 and 7761331. Skipping...\n", + "No path found between 364972 and 7761328. Skipping...\n", + "No path found between 364972 and 7761331. Skipping...\n", + "No path found between 7193004 and 7761328. Skipping...\n", + "No path found between 7193005 and 7761331. Skipping...\n", + "No path found between 7193005 and 7761328. Skipping...\n", + "No path found between 364975 and 7761331. Skipping...\n", + "No path found between 364975 and 7761328. Skipping...\n", + "No path found between 364977 and 7761328. Skipping...\n", + "No path found between 7193010 and 7761328. Skipping...\n", + "No path found between 7193010 and 7761331. Skipping...\n", + "No path found between 7193011 and 7761331. Skipping...\n", + "No path found between 7193011 and 7761328. Skipping...\n", + "No path found between 7193012 and 7761331. Skipping...\n", + "No path found between 7193012 and 7761328. Skipping...\n", + "No path found between 364982 and 7761331. Skipping...\n", + "No path found between 364982 and 7761328. Skipping...\n", + "No path found between 364983 and 7761331. Skipping...\n", + "No path found between 364983 and 7761328. Skipping...\n", + "No path found between 2015910328 and 7761331. Skipping...\n", + "No path found between 2015910328 and 7761328. Skipping...\n", + "No path found between 364995 and 7761328. Skipping...\n", + "No path found between 364995 and 7761331. Skipping...\n", + "No path found between 7193044 and 7761331. Skipping...\n", + "No path found between 7193044 and 7761328. Skipping...\n", + "No path found between 1808857 and 7761331. Skipping...\n", + "No path found between 1808857 and 7761328. Skipping...\n", + "No path found between 9484770 and 7761328. Skipping...\n", + "No path found between 9484770 and 7761331. Skipping...\n", + "No path found between 9484774 and 7761328. Skipping...\n", + "No path found between 9484774 and 7761331. Skipping...\n", + "No path found between 9484775 and 7761331. Skipping...\n", + "No path found between 9484775 and 7761328. Skipping...\n", + "No path found between 392690157 and 7761331. Skipping...\n", + "No path found between 392690157 and 7761328. Skipping...\n", + "No path found between 7193071 and 7761331. Skipping...\n", + "No path found between 7193071 and 7761328. Skipping...\n", + "No path found between 7193073 and 7761328. Skipping...\n", + "No path found between 7193073 and 7761331. Skipping...\n", + "No path found between 7193075 and 7761328. Skipping...\n", + "No path found between 7193075 and 7761331. Skipping...\n", + "No path found between 7193076 and 7761331. Skipping...\n", + "No path found between 7193077 and 7761328. Skipping...\n", + "No path found between 7193077 and 7761331. Skipping...\n", + "No path found between 7193079 and 7761331. Skipping...\n", + "No path found between 7193082 and 7761328. Skipping...\n", + "No path found between 7193082 and 7761331. Skipping...\n", + "No path found between 7193083 and 7761331. Skipping...\n", + "No path found between 7193084 and 7761328. Skipping...\n", + "No path found between 9484796 and 7761328. Skipping...\n", + "No path found between 3068135939 and 7761328. Skipping...\n", + "No path found between 3068135939 and 7761331. Skipping...\n", + "No path found between 9484808 and 7761331. Skipping...\n", + "No path found between 9484808 and 7761328. Skipping...\n", + "No path found between 9484810 and 7761328. Skipping...\n", + "No path found between 9484810 and 7761331. Skipping...\n", + "No path found between 7193104 and 7761328. Skipping...\n", + "No path found between 7193106 and 7761331. Skipping...\n", + "No path found between 7193106 and 7761328. Skipping...\n", + "No path found between 7193110 and 7761331. Skipping...\n", + "No path found between 7193110 and 7761328. Skipping...\n", + "No path found between 365081 and 7761328. Skipping...\n", + "No path found between 365081 and 7761331. Skipping...\n", + "No path found between 7193113 and 7761331. Skipping...\n", + "No path found between 7193113 and 7761328. Skipping...\n", + "No path found between 7193115 and 7761328. Skipping...\n", + "No path found between 7193115 and 7761331. Skipping...\n", + "No path found between 7193116 and 7761331. Skipping...\n", + "No path found between 9484826 and 7761331. Skipping...\n", + "No path found between 9484826 and 7761328. Skipping...\n", + "No path found between 365087 and 7761328. Skipping...\n", + "No path found between 365087 and 7761331. Skipping...\n", + "No path found between 1334612513 and 7761331. Skipping...\n", + "No path found between 1334612513 and 7761328. Skipping...\n", + "No path found between 1782387239 and 7761328. Skipping...\n", + "No path found between 8300995112 and 7761328. Skipping...\n", + "No path found between 8300995112 and 7761331. Skipping...\n", + "No path found between 10854962 and 7761331. Skipping...\n", + "No path found between 10854963 and 7761331. Skipping...\n", + "No path found between 10854963 and 7761328. Skipping...\n", + "No path found between 3065580105 and 7761328. Skipping...\n", + "No path found between 3065580105 and 7761331. Skipping...\n", + "No path found between 403796568 and 7761331. Skipping...\n", + "No path found between 403796568 and 7761328. Skipping...\n", + "No path found between 403796569 and 7761328. Skipping...\n", + "No path found between 403796569 and 7761331. Skipping...\n", + "No path found between 392690268 and 7761328. Skipping...\n", + "No path found between 392690268 and 7761331. Skipping...\n", + "No path found between 10855007 and 7761328. Skipping...\n", + "No path found between 10855007 and 7761331. Skipping...\n", + "No path found between 10855008 and 7761328. Skipping...\n", + "No path found between 10855008 and 7761331. Skipping...\n", + "No path found between 10855009 and 7761331. Skipping...\n", + "No path found between 10855009 and 7761328. Skipping...\n", + "No path found between 583510634 and 7761331. Skipping...\n", + "No path found between 583510634 and 7761328. Skipping...\n", + "No path found between 7577946730 and 7761331. Skipping...\n", + "No path found between 7577946730 and 7761328. Skipping...\n", + "No path found between 7577946732 and 7761331. Skipping...\n", + "No path found between 7577946732 and 7761328. Skipping...\n", + "No path found between 9484910 and 7761331. Skipping...\n", + "No path found between 9484910 and 7761328. Skipping...\n", + "No path found between 9484916 and 7761331. Skipping...\n", + "No path found between 9484916 and 7761328. Skipping...\n", + "No path found between 9484917 and 7761331. Skipping...\n", + "No path found between 9484917 and 7761328. Skipping...\n", + "No path found between 9484918 and 7761328. Skipping...\n", + "No path found between 9484918 and 7761331. Skipping...\n", + "No path found between 9484919 and 7761328. Skipping...\n", + "No path found between 9484920 and 7761331. Skipping...\n", + "No path found between 9484920 and 7761328. Skipping...\n", + "No path found between 9484921 and 7761331. Skipping...\n", + "No path found between 9659658885 and 7761331. Skipping...\n", + "No path found between 9659658885 and 7761328. Skipping...\n", + "No path found between 9659658886 and 7761328. Skipping...\n", + "No path found between 9659658886 and 7761331. Skipping...\n", + "No path found between 3065582227 and 7761328. Skipping...\n", + "No path found between 8086196 and 7761331. Skipping...\n", + "No path found between 8086196 and 7761328. Skipping...\n", + "No path found between 2015910583 and 7761331. Skipping...\n", + "No path found between 2015910583 and 7761328. Skipping...\n", + "No path found between 8335801015 and 7761328. Skipping...\n", + "No path found between 8335801015 and 7761331. Skipping...\n", + "No path found between 8335801016 and 7761331. Skipping...\n", + "No path found between 8335801016 and 7761328. Skipping...\n", + "No path found between 392690370 and 7761328. Skipping...\n", + "No path found between 392690370 and 7761331. Skipping...\n", + "No path found between 2831569613 and 7761331. Skipping...\n", + "No path found between 2831569613 and 7761328. Skipping...\n", + "No path found between 10382032 and 7761328. Skipping...\n", + "No path found between 10382032 and 7761331. Skipping...\n", + "No path found between 10382038 and 7761331. Skipping...\n", + "No path found between 10382038 and 7761328. Skipping...\n", + "No path found between 10382039 and 7761331. Skipping...\n", + "No path found between 10382040 and 7761328. Skipping...\n", + "No path found between 10382040 and 7761331. Skipping...\n", + "No path found between 10382041 and 7761331. Skipping...\n", + "No path found between 10382041 and 7761328. Skipping...\n", + "No path found between 2831569625 and 7761331. Skipping...\n", + "No path found between 10810076 and 7761328. Skipping...\n", + "No path found between 10810076 and 7761331. Skipping...\n", + "No path found between 7661470430 and 7761328. Skipping...\n", + "No path found between 7661470430 and 7761331. Skipping...\n", + "No path found between 10810079 and 7761331. Skipping...\n", + "No path found between 10810079 and 7761328. Skipping...\n", + "No path found between 7661470431 and 7761331. Skipping...\n", + "No path found between 10810082 and 7761331. Skipping...\n", + "No path found between 10810082 and 7761328. Skipping...\n", + "No path found between 10810087 and 7761331. Skipping...\n", + "No path found between 10810087 and 7761328. Skipping...\n", + "No path found between 10810088 and 7761331. Skipping...\n", + "No path found between 10810088 and 7761328. Skipping...\n", + "No path found between 392690407 and 7761328. Skipping...\n", + "No path found between 392690407 and 7761331. Skipping...\n", + "No path found between 392690408 and 7761328. Skipping...\n", + "No path found between 392690408 and 7761331. Skipping...\n", + "No path found between 10382061 and 7761331. Skipping...\n", + "No path found between 10382061 and 7761328. Skipping...\n", + "No path found between 10382062 and 7761328. Skipping...\n", + "No path found between 10382063 and 7761331. Skipping...\n", + "No path found between 10382063 and 7761328. Skipping...\n", + "No path found between 10382064 and 7761331. Skipping...\n", + "No path found between 10382064 and 7761328. Skipping...\n", + "No path found between 10382065 and 7761331. Skipping...\n", + "No path found between 10382065 and 7761328. Skipping...\n", + "No path found between 8545315565 and 7761331. Skipping...\n", + "No path found between 8545315565 and 7761328. Skipping...\n", + "No path found between 10382070 and 7761328. Skipping...\n", + "No path found between 10382070 and 7761331. Skipping...\n", + "No path found between 1708795 and 7761328. Skipping...\n", + "No path found between 540031740 and 7761331. Skipping...\n", + "No path found between 540031740 and 7761328. Skipping...\n", + "No path found between 10382079 and 7761331. Skipping...\n", + "No path found between 10382079 and 7761328. Skipping...\n", + "No path found between 10382080 and 7761328. Skipping...\n", + "No path found between 10382080 and 7761331. Skipping...\n", + "No path found between 1708802 and 7761331. Skipping...\n", + "No path found between 1708802 and 7761328. Skipping...\n", + "No path found between 10382084 and 7761331. Skipping...\n", + "No path found between 10382084 and 7761328. Skipping...\n", + "No path found between 10382088 and 7761328. Skipping...\n", + "No path found between 10382088 and 7761331. Skipping...\n", + "No path found between 10382089 and 7761328. Skipping...\n", + "No path found between 10382089 and 7761331. Skipping...\n", + "No path found between 1708811 and 7761328. Skipping...\n", + "No path found between 1708811 and 7761331. Skipping...\n", + "No path found between 1708814 and 7761331. Skipping...\n", + "No path found between 1708814 and 7761328. Skipping...\n", + "No path found between 1708815 and 7761328. Skipping...\n", + "No path found between 1708815 and 7761331. Skipping...\n", + "No path found between 10382097 and 7761331. Skipping...\n", + "No path found between 10382106 and 7761331. Skipping...\n", + "No path found between 10382106 and 7761328. Skipping...\n", + "No path found between 1708827 and 7761328. Skipping...\n", + "No path found between 1708827 and 7761331. Skipping...\n", + "No path found between 10382107 and 7761331. Skipping...\n", + "No path found between 10382107 and 7761328. Skipping...\n", + "No path found between 1708829 and 7761328. Skipping...\n", + "No path found between 1708829 and 7761331. Skipping...\n", + "No path found between 1708832 and 7761328. Skipping...\n", + "No path found between 1708832 and 7761331. Skipping...\n", + "No path found between 10382118 and 7761331. Skipping...\n", + "No path found between 10382118 and 7761328. Skipping...\n", + "No path found between 10382121 and 7761328. Skipping...\n", + "No path found between 10382121 and 7761331. Skipping...\n", + "No path found between 4311659306 and 7761328. Skipping...\n", + "No path found between 4311659306 and 7761331. Skipping...\n", + "No path found between 10382125 and 7761328. Skipping...\n", + "No path found between 1708846 and 7761328. Skipping...\n", + "No path found between 1708846 and 7761331. Skipping...\n", + "No path found between 10382126 and 7761328. Skipping...\n", + "No path found between 10382127 and 7761331. Skipping...\n", + "No path found between 10382127 and 7761328. Skipping...\n", + "No path found between 4311659314 and 7761331. Skipping...\n", + "No path found between 4311659314 and 7761328. Skipping...\n", + "No path found between 4311659315 and 7761328. Skipping...\n", + "No path found between 1708856 and 7761331. Skipping...\n", + "No path found between 1708856 and 7761328. Skipping...\n", + "No path found between 532497213 and 7761331. Skipping...\n", + "No path found between 532497213 and 7761328. Skipping...\n", + "No path found between 1708866 and 7761328. Skipping...\n", + "No path found between 1708866 and 7761331. Skipping...\n", + "No path found between 1708867 and 7761328. Skipping...\n", + "No path found between 10382148 and 7761328. Skipping...\n", + "No path found between 5239629 and 7761328. Skipping...\n", + "No path found between 5239631 and 7761328. Skipping...\n", + "No path found between 5239631 and 7761331. Skipping...\n", + "No path found between 869010256 and 7761328. Skipping...\n", + "No path found between 869010256 and 7761331. Skipping...\n", + "No path found between 5239632 and 7761328. Skipping...\n", + "No path found between 5239632 and 7761331. Skipping...\n", + "No path found between 5239633 and 7761331. Skipping...\n", + "No path found between 5239633 and 7761328. Skipping...\n", + "No path found between 1708882 and 7761328. Skipping...\n", + "No path found between 5239635 and 7761331. Skipping...\n", + "No path found between 5239635 and 7761328. Skipping...\n", + "No path found between 10382165 and 7761331. Skipping...\n", + "No path found between 10382165 and 7761328. Skipping...\n", + "No path found between 10382166 and 7761331. Skipping...\n", + "No path found between 392690520 and 7761331. Skipping...\n", + "No path found between 392690520 and 7761328. Skipping...\n", + "No path found between 392690521 and 7761328. Skipping...\n", + "No path found between 392690521 and 7761331. Skipping...\n", + "No path found between 5239639 and 7761328. Skipping...\n", + "No path found between 5239639 and 7761331. Skipping...\n", + "No path found between 5239640 and 7761331. Skipping...\n", + "No path found between 5239640 and 7761328. Skipping...\n", + "No path found between 5239642 and 7761331. Skipping...\n", + "No path found between 10382173 and 7761331. Skipping...\n", + "No path found between 10382173 and 7761328. Skipping...\n", + "No path found between 5239644 and 7761331. Skipping...\n", + "No path found between 5239644 and 7761328. Skipping...\n", + "No path found between 5239646 and 7761331. Skipping...\n", + "No path found between 5239646 and 7761328. Skipping...\n", + "No path found between 10382175 and 7761328. Skipping...\n", + "No path found between 10382175 and 7761331. Skipping...\n", + "No path found between 10382176 and 7761331. Skipping...\n", + "No path found between 10382177 and 7761328. Skipping...\n", + "No path found between 10382177 and 7761331. Skipping...\n", + "No path found between 10382178 and 7761328. Skipping...\n", + "No path found between 10382178 and 7761331. Skipping...\n", + "No path found between 5573028709 and 7761328. Skipping...\n", + "No path found between 5573028709 and 7761331. Skipping...\n", + "No path found between 5573028710 and 7761328. Skipping...\n", + "No path found between 5573028710 and 7761331. Skipping...\n", + "No path found between 365418 and 7761328. Skipping...\n", + "No path found between 365418 and 7761331. Skipping...\n", + "No path found between 365446 and 7761331. Skipping...\n", + "No path found between 365446 and 7761328. Skipping...\n", + "No path found between 365451 and 7761331. Skipping...\n", + "No path found between 10382227 and 7761328. Skipping...\n", + "No path found between 10382227 and 7761331. Skipping...\n", + "No path found between 10382228 and 7761328. Skipping...\n", + "No path found between 10382228 and 7761331. Skipping...\n", + "No path found between 10382229 and 7761331. Skipping...\n", + "No path found between 10382229 and 7761328. Skipping...\n", + "No path found between 10382233 and 7761331. Skipping...\n", + "No path found between 10382233 and 7761328. Skipping...\n", + "No path found between 10382246 and 7761331. Skipping...\n", + "No path found between 10382246 and 7761328. Skipping...\n", + "No path found between 9675088807 and 7761328. Skipping...\n", + "No path found between 9675088807 and 7761331. Skipping...\n", + "No path found between 1375728592 and 7761328. Skipping...\n", + "No path found between 1375728592 and 7761331. Skipping...\n", + "No path found between 10382160 and 7761331. Skipping...\n", + "No path found between 7636462571 and 7761331. Skipping...\n", + "No path found between 7636462571 and 7761328. Skipping...\n", + "No path found between 10382161 and 7761328. Skipping...\n", + "No path found between 10382161 and 7761331. Skipping...\n", + "No path found between 365552 and 7761331. Skipping...\n", + "No path found between 365552 and 7761328. Skipping...\n", + "No path found between 365553 and 7761331. Skipping...\n", + "No path found between 3068464115 and 7761328. Skipping...\n", + "No path found between 3068464115 and 7761331. Skipping...\n", + "No path found between 3065158643 and 7761328. Skipping...\n", + "No path found between 3065158643 and 7761331. Skipping...\n", + "No path found between 10382162 and 7761331. Skipping...\n", + "No path found between 365561 and 7761331. Skipping...\n", + "No path found between 365561 and 7761328. Skipping...\n", + "No path found between 10382164 and 7761331. Skipping...\n", + "No path found between 10382164 and 7761328. Skipping...\n", + "No path found between 10214402 and 7761328. Skipping...\n", + "No path found between 10214402 and 7761331. Skipping...\n", + "No path found between 365571 and 7761331. Skipping...\n", + "No path found between 365571 and 7761328. Skipping...\n", + "No path found between 10214405 and 7761328. Skipping...\n", + "No path found between 10214405 and 7761331. Skipping...\n", + "No path found between 8784903 and 7761328. Skipping...\n", + "No path found between 8784903 and 7761331. Skipping...\n", + "No path found between 8784904 and 7761331. Skipping...\n", + "No path found between 8784904 and 7761328. Skipping...\n", + "No path found between 1375732743 and 7761331. Skipping...\n", + "No path found between 8784911 and 7761328. Skipping...\n", + "No path found between 8784911 and 7761331. Skipping...\n", + "No path found between 365586 and 7761331. Skipping...\n", + "No path found between 365586 and 7761328. Skipping...\n", + "No path found between 3065154579 and 7761328. Skipping...\n", + "No path found between 3065154579 and 7761331. Skipping...\n", + "No path found between 365590 and 7761331. Skipping...\n", + "No path found between 365590 and 7761328. Skipping...\n", + "No path found between 5585806360 and 7761331. Skipping...\n", + "No path found between 5585806360 and 7761328. Skipping...\n", + "No path found between 365601 and 7761328. Skipping...\n", + "No path found between 365601 and 7761331. Skipping...\n", + "No path found between 413910054 and 7761331. Skipping...\n", + "No path found between 3065158694 and 7761328. Skipping...\n", + "No path found between 3065158694 and 7761331. Skipping...\n", + "No path found between 10214445 and 7761328. Skipping...\n", + "No path found between 10214445 and 7761331. Skipping...\n", + "No path found between 8784943 and 7761331. Skipping...\n", + "No path found between 8784943 and 7761328. Skipping...\n", + "No path found between 10214447 and 7761328. Skipping...\n", + "No path found between 10214447 and 7761331. Skipping...\n", + "No path found between 365617 and 7761328. Skipping...\n", + "No path found between 365617 and 7761331. Skipping...\n", + "No path found between 8784945 and 7761328. Skipping...\n", + "No path found between 8784947 and 7761331. Skipping...\n", + "No path found between 8784947 and 7761328. Skipping...\n", + "No path found between 8784948 and 7761328. Skipping...\n", + "No path found between 8784948 and 7761331. Skipping...\n", + "No path found between 10214448 and 7761331. Skipping...\n", + "No path found between 8784950 and 7761331. Skipping...\n", + "No path found between 8784950 and 7761328. Skipping...\n", + "No path found between 10214449 and 7761328. Skipping...\n", + "No path found between 10214449 and 7761331. Skipping...\n", + "No path found between 10214450 and 7761331. Skipping...\n", + "No path found between 10214450 and 7761328. Skipping...\n", + "No path found between 7760960 and 7761328. Skipping...\n", + "No path found between 7760960 and 7761331. Skipping...\n", + "No path found between 7760961 and 7761328. Skipping...\n", + "No path found between 7760962 and 7761328. Skipping...\n", + "No path found between 7760962 and 7761331. Skipping...\n", + "No path found between 5574194240 and 7761331. Skipping...\n", + "No path found between 7760965 and 7761328. Skipping...\n", + "No path found between 7760965 and 7761331. Skipping...\n", + "No path found between 7760976 and 7761331. Skipping...\n", + "No path found between 7760976 and 7761328. Skipping...\n", + "No path found between 2619903057 and 7761328. Skipping...\n", + "No path found between 2619903057 and 7761331. Skipping...\n", + "No path found between 8328537170 and 7761328. Skipping...\n", + "No path found between 8328537170 and 7761331. Skipping...\n", + "No path found between 8328537173 and 7761331. Skipping...\n", + "No path found between 8328537173 and 7761328. Skipping...\n", + "No path found between 7760983 and 7761328. Skipping...\n", + "No path found between 7760983 and 7761331. Skipping...\n", + "No path found between 5574194267 and 7761328. Skipping...\n", + "No path found between 5574194267 and 7761331. Skipping...\n", + "No path found between 7760988 and 7761331. Skipping...\n", + "No path found between 7760988 and 7761328. Skipping...\n", + "No path found between 8784989 and 7761328. Skipping...\n", + "No path found between 8784989 and 7761331. Skipping...\n", + "No path found between 5574194268 and 7761331. Skipping...\n", + "No path found between 5574194268 and 7761328. Skipping...\n", + "No path found between 5574194269 and 7761328. Skipping...\n", + "No path found between 5574194269 and 7761331. Skipping...\n", + "No path found between 5574194272 and 7761328. Skipping...\n", + "No path found between 5574194272 and 7761331. Skipping...\n", + "No path found between 7760994 and 7761331. Skipping...\n", + "No path found between 7760994 and 7761328. Skipping...\n", + "No path found between 8334436 and 7761331. Skipping...\n", + "No path found between 8334436 and 7761328. Skipping...\n", + "No path found between 7760997 and 7761328. Skipping...\n", + "No path found between 7760997 and 7761331. Skipping...\n", + "No path found between 8784998 and 7761331. Skipping...\n", + "No path found between 8784998 and 7761328. Skipping...\n", + "No path found between 5574194278 and 7761331. Skipping...\n", + "No path found between 5574194278 and 7761328. Skipping...\n", + "No path found between 7761000 and 7761331. Skipping...\n", + "No path found between 7761000 and 7761328. Skipping...\n", + "No path found between 10214504 and 7761328. Skipping...\n", + "No path found between 10214504 and 7761331. Skipping...\n", + "No path found between 10929256 and 7761328. Skipping...\n", + "No path found between 10929256 and 7761331. Skipping...\n", + "No path found between 8334443 and 7761331. Skipping...\n", + "No path found between 8334443 and 7761328. Skipping...\n", + "No path found between 10929257 and 7761328. Skipping...\n", + "No path found between 10929257 and 7761331. Skipping...\n", + "No path found between 8334445 and 7761331. Skipping...\n", + "No path found between 8334445 and 7761328. Skipping...\n", + "No path found between 7761006 and 7761328. Skipping...\n", + "No path found between 7761006 and 7761331. Skipping...\n", + "No path found between 7761010 and 7761328. Skipping...\n", + "No path found between 8334451 and 7761331. Skipping...\n", + "No path found between 8334451 and 7761328. Skipping...\n", + "No path found between 4760747122 and 7761328. Skipping...\n", + "No path found between 4760747122 and 7761331. Skipping...\n", + "No path found between 9348213 and 7761328. Skipping...\n", + "No path found between 9348213 and 7761331. Skipping...\n", + "No path found between 1685990538 and 7761331. Skipping...\n", + "No path found between 9348240 and 7761331. Skipping...\n", + "No path found between 9348241 and 7761328. Skipping...\n", + "No path found between 9348241 and 7761331. Skipping...\n", + "No path found between 9348242 and 7761328. Skipping...\n", + "No path found between 9348242 and 7761331. Skipping...\n", + "No path found between 9442870419 and 7761331. Skipping...\n", + "No path found between 9442870419 and 7761328. Skipping...\n", + "No path found between 9348244 and 7761328. Skipping...\n", + "No path found between 9348246 and 7761331. Skipping...\n", + "No path found between 9348246 and 7761328. Skipping...\n", + "No path found between 8785049 and 7761331. Skipping...\n", + "No path found between 8785049 and 7761328. Skipping...\n", + "No path found between 1685990555 and 7761331. Skipping...\n", + "No path found between 1685990555 and 7761328. Skipping...\n", + "No path found between 7761052 and 7761328. Skipping...\n", + "No path found between 7761052 and 7761331. Skipping...\n", + "No path found between 7761054 and 7761328. Skipping...\n", + "No path found between 7761054 and 7761331. Skipping...\n", + "No path found between 1685990560 and 7761328. Skipping...\n", + "No path found between 1685990560 and 7761331. Skipping...\n", + "No path found between 9348258 and 7761331. Skipping...\n", + "No path found between 1685990563 and 7761328. Skipping...\n", + "No path found between 1685990563 and 7761331. Skipping...\n", + "No path found between 1685990564 and 7761328. Skipping...\n", + "No path found between 1685990564 and 7761331. Skipping...\n", + "No path found between 7761061 and 7761331. Skipping...\n", + "No path found between 7761061 and 7761328. Skipping...\n", + "No path found between 7761062 and 7761328. Skipping...\n", + "No path found between 7761062 and 7761331. Skipping...\n", + "No path found between 2431480994 and 7761328. Skipping...\n", + "No path found between 2431480994 and 7761331. Skipping...\n", + "No path found between 2431481015 and 7761328. Skipping...\n", + "No path found between 2431481015 and 7761331. Skipping...\n", + "No path found between 2431481018 and 7761331. Skipping...\n", + "No path found between 2431481018 and 7761328. Skipping...\n", + "No path found between 7761086 and 7761328. Skipping...\n", + "No path found between 7761086 and 7761331. Skipping...\n", + "No path found between 8334527 and 7761331. Skipping...\n", + "No path found between 8334527 and 7761328. Skipping...\n", + "No path found between 2431481029 and 7761328. Skipping...\n", + "No path found between 9348295 and 7761328. Skipping...\n", + "No path found between 9348295 and 7761331. Skipping...\n", + "No path found between 2431481032 and 7761328. Skipping...\n", + "No path found between 2431481032 and 7761331. Skipping...\n", + "No path found between 10980563 and 7761328. Skipping...\n", + "No path found between 10980563 and 7761331. Skipping...\n", + "No path found between 10980564 and 7761328. Skipping...\n", + "No path found between 10980564 and 7761331. Skipping...\n", + "No path found between 10980566 and 7761331. Skipping...\n", + "No path found between 10980566 and 7761328. Skipping...\n", + "No path found between 10980568 and 7761331. Skipping...\n", + "No path found between 10980569 and 7761328. Skipping...\n", + "No path found between 10980569 and 7761331. Skipping...\n", + "No path found between 10980571 and 7761331. Skipping...\n", + "No path found between 10980571 and 7761328. Skipping...\n", + "No path found between 8117585116 and 7761328. Skipping...\n", + "No path found between 8117585116 and 7761331. Skipping...\n", + "No path found between 3065156829 and 7761331. Skipping...\n", + "No path found between 3065156829 and 7761328. Skipping...\n", + "No path found between 10980574 and 7761331. Skipping...\n", + "No path found between 10980574 and 7761328. Skipping...\n", + "No path found between 7761119 and 7761328. Skipping...\n", + "No path found between 10980575 and 7761331. Skipping...\n", + "No path found between 10980575 and 7761328. Skipping...\n", + "No path found between 10980576 and 7761328. Skipping...\n", + "No path found between 10980576 and 7761331. Skipping...\n", + "No path found between 10980578 and 7761328. Skipping...\n", + "No path found between 10980578 and 7761331. Skipping...\n", + "No path found between 10980596 and 7761331. Skipping...\n", + "No path found between 10980596 and 7761328. Skipping...\n", + "No path found between 10980597 and 7761328. Skipping...\n", + "No path found between 10980597 and 7761331. Skipping...\n", + "No path found between 10980603 and 7761328. Skipping...\n", + "No path found between 10980603 and 7761331. Skipping...\n", + "No path found between 10980612 and 7761328. Skipping...\n", + "No path found between 10980613 and 7761328. Skipping...\n", + "No path found between 10980613 and 7761331. Skipping...\n", + "No path found between 10980615 and 7761331. Skipping...\n", + "No path found between 582823188 and 7761331. Skipping...\n", + "No path found between 582823188 and 7761328. Skipping...\n", + "No path found between 10980629 and 7761328. Skipping...\n", + "No path found between 10980629 and 7761331. Skipping...\n", + "No path found between 2839133463 and 7761331. Skipping...\n", + "No path found between 2839133463 and 7761328. Skipping...\n", + "No path found between 7761176 and 7761331. Skipping...\n", + "No path found between 7761176 and 7761328. Skipping...\n", + "No path found between 2839133464 and 7761331. Skipping...\n", + "No path found between 2839133464 and 7761328. Skipping...\n", + "No path found between 4337327385 and 7761328. Skipping...\n", + "No path found between 4337327385 and 7761331. Skipping...\n", + "No path found between 583243035 and 7761328. Skipping...\n", + "No path found between 583243035 and 7761331. Skipping...\n", + "No path found between 4337327387 and 7761328. Skipping...\n", + "No path found between 10980640 and 7761328. Skipping...\n", + "No path found between 10980640 and 7761331. Skipping...\n", + "No path found between 1256985889 and 7761328. Skipping...\n", + "No path found between 1256985889 and 7761331. Skipping...\n", + "No path found between 7761186 and 7761328. Skipping...\n", + "No path found between 7761186 and 7761331. Skipping...\n", + "No path found between 4336198950 and 7761328. Skipping...\n", + "No path found between 4336198950 and 7761331. Skipping...\n", + "No path found between 4136572202 and 7761328. Skipping...\n", + "No path found between 4136572202 and 7761331. Skipping...\n", + "No path found between 583474475 and 7761328. Skipping...\n", + "No path found between 583474475 and 7761331. Skipping...\n", + "No path found between 8300540 and 7761331. Skipping...\n", + "No path found between 32126253 and 7761328. Skipping...\n", + "No path found between 32126253 and 7761331. Skipping...\n", + "No path found between 7761198 and 7761328. Skipping...\n", + "No path found between 7761198 and 7761331. Skipping...\n", + "No path found between 4136572206 and 7761328. Skipping...\n", + "No path found between 4136572206 and 7761331. Skipping...\n", + "No path found between 4136572207 and 7761331. Skipping...\n", + "No path found between 4136572207 and 7761328. Skipping...\n", + "No path found between 4136572210 and 7761328. Skipping...\n", + "No path found between 4136572210 and 7761331. Skipping...\n", + "No path found between 7761206 and 7761328. Skipping...\n", + "No path found between 7761206 and 7761331. Skipping...\n", + "No path found between 583474488 and 7761331. Skipping...\n", + "No path found between 583474488 and 7761328. Skipping...\n", + "No path found between 7761211 and 7761328. Skipping...\n", + "No path found between 7761211 and 7761331. Skipping...\n", + "No path found between 7761218 and 7761328. Skipping...\n", + "No path found between 7761218 and 7761331. Skipping...\n", + "No path found between 7761229 and 7761328. Skipping...\n", + "No path found between 7761229 and 7761331. Skipping...\n", + "No path found between 7761237 and 7761331. Skipping...\n", + "No path found between 7761239 and 7761331. Skipping...\n", + "No path found between 7761239 and 7761328. Skipping...\n", + "No path found between 7761240 and 7761328. Skipping...\n", + "No path found between 7761240 and 7761331. Skipping...\n", + "No path found between 7761244 and 7761328. Skipping...\n", + "No path found between 7761244 and 7761331. Skipping...\n", + "No path found between 2434915678 and 7761328. Skipping...\n", + "No path found between 2434915678 and 7761331. Skipping...\n", + "No path found between 7761248 and 7761328. Skipping...\n", + "No path found between 7761254 and 7761328. Skipping...\n", + "No path found between 7761254 and 7761331. Skipping...\n", + "No path found between 7761257 and 7761331. Skipping...\n", + "No path found between 7761257 and 7761328. Skipping...\n", + "No path found between 7761258 and 7761328. Skipping...\n", + "No path found between 7761258 and 7761331. Skipping...\n", + "No path found between 7761260 and 7761328. Skipping...\n", + "No path found between 7761261 and 7761331. Skipping...\n", + "No path found between 7761261 and 7761328. Skipping...\n", + "No path found between 7761262 and 7761328. Skipping...\n", + "No path found between 7761262 and 7761331. Skipping...\n", + "No path found between 7761263 and 7761328. Skipping...\n", + "No path found between 7761263 and 7761331. Skipping...\n", + "No path found between 7761266 and 7761331. Skipping...\n", + "No path found between 7761266 and 7761328. Skipping...\n", + "No path found between 7761267 and 7761328. Skipping...\n", + "No path found between 7761267 and 7761331. Skipping...\n", + "No path found between 7761268 and 7761331. Skipping...\n", + "No path found between 7761268 and 7761328. Skipping...\n", + "No path found between 10925428 and 7761328. Skipping...\n", + "No path found between 10925430 and 7761328. Skipping...\n", + "No path found between 10925430 and 7761331. Skipping...\n", + "No path found between 7761272 and 7761331. Skipping...\n", + "No path found between 10925432 and 7761331. Skipping...\n", + "No path found between 10925432 and 7761328. Skipping...\n", + "No path found between 7761274 and 7761328. Skipping...\n", + "No path found between 7761274 and 7761331. Skipping...\n", + "No path found between 10925433 and 7761331. Skipping...\n", + "No path found between 10925433 and 7761328. Skipping...\n", + "No path found between 7073150 and 7761328. Skipping...\n", + "No path found between 7761278 and 7761328. Skipping...\n", + "No path found between 7761278 and 7761331. Skipping...\n", + "No path found between 7761280 and 7761328. Skipping...\n", + "No path found between 7761280 and 7761331. Skipping...\n", + "No path found between 10925438 and 7761331. Skipping...\n", + "No path found between 10925438 and 7761328. Skipping...\n", + "No path found between 7761282 and 7761331. Skipping...\n", + "No path found between 7761282 and 7761328. Skipping...\n", + "No path found between 7761283 and 7761328. Skipping...\n", + "No path found between 7761283 and 7761331. Skipping...\n", + "No path found between 10925442 and 7761331. Skipping...\n", + "No path found between 10925442 and 7761328. Skipping...\n", + "No path found between 7761285 and 7761328. Skipping...\n", + "No path found between 10925444 and 7761331. Skipping...\n", + "No path found between 10925444 and 7761328. Skipping...\n", + "No path found between 10925446 and 7761331. Skipping...\n", + "No path found between 7761289 and 7761328. Skipping...\n", + "No path found between 7761289 and 7761331. Skipping...\n", + "No path found between 7761292 and 7761328. Skipping...\n", + "No path found between 7761298 and 7761331. Skipping...\n", + "No path found between 7761298 and 7761328. Skipping...\n", + "No path found between 7761301 and 7761331. Skipping...\n", + "No path found between 7761301 and 7761328. Skipping...\n", + "No path found between 7761304 and 7761331. Skipping...\n", + "No path found between 7761304 and 7761328. Skipping...\n", + "No path found between 7761306 and 7761331. Skipping...\n", + "No path found between 7761306 and 7761328. Skipping...\n", + "No path found between 3068468634 and 7761328. Skipping...\n", + "No path found between 3068468634 and 7761331. Skipping...\n", + "No path found between 7761310 and 7761328. Skipping...\n", + "No path found between 7761310 and 7761331. Skipping...\n", + "No path found between 7761312 and 7761328. Skipping...\n", + "No path found between 7761312 and 7761331. Skipping...\n", + "No path found between 7761315 and 7761328. Skipping...\n", + "No path found between 7761315 and 7761331. Skipping...\n", + "No path found between 1182840231 and 7761331. Skipping...\n", + "No path found between 7761323 and 7761328. Skipping...\n", + "No path found between 7761324 and 10382127. Skipping...\n", + "No path found between 7761324 and 7761457. Skipping...\n", + "No path found between 7761324 and 9682561. Skipping...\n", + "No path found between 7761324 and 8784945. Skipping...\n", + "No path found between 7761324 and 4763054359. Skipping...\n", + "No path found between 7761324 and 540031740. Skipping...\n", + "No path found between 7761324 and 1685990538. Skipping...\n", + "No path found between 7761324 and 7193116. Skipping...\n", + "No path found between 7761324 and 7638526281. Skipping...\n", + "No path found between 7761324 and 583487076. Skipping...\n", + "No path found between 7761324 and 4760747122. Skipping...\n", + "No path found between 7761324 and 1685990560. Skipping...\n", + "No path found between 7761324 and 8837239142. Skipping...\n", + "No path found between 7761324 and 601045205. Skipping...\n", + "No path found between 7761324 and 413910054. Skipping...\n", + "No path found between 7761324 and 7761257. Skipping...\n", + "No path found between 7761324 and 7760994. Skipping...\n", + "No path found between 7761324 and 5585806360. Skipping...\n", + "No path found between 7761324 and 8334451. Skipping...\n", + "No path found between 7761324 and 366268. Skipping...\n", + "No path found between 7761324 and 7761266. Skipping...\n", + "No path found between 7761324 and 3630145581. Skipping...\n", + "No path found between 7761324 and 7135049. Skipping...\n", + "No path found between 7761324 and 392690268. Skipping...\n", + "No path found between 7761324 and 5683384271. Skipping...\n", + "No path found between 7761324 and 7761282. Skipping...\n", + "No path found between 7761324 and 10214445. Skipping...\n", + "No path found between 7761324 and 7193083. Skipping...\n", + "No path found between 7761324 and 9348258. Skipping...\n", + "No path found between 7761324 and 1256985889. Skipping...\n", + "No path found between 7761324 and 364908. Skipping...\n", + "No path found between 7761324 and 7761176. Skipping...\n", + "No path found between 7761324 and 10854962. Skipping...\n", + "No path found between 7761324 and 364983. Skipping...\n", + "No path found between 7761324 and 1334612513. Skipping...\n", + "No path found between 7761324 and 7761312. Skipping...\n", + "No path found between 7761324 and 7761447. Skipping...\n", + "No path found between 7761324 and 10980578. Skipping...\n", + "No path found between 7761324 and 7761054. Skipping...\n", + "No path found between 7761324 and 5239635. Skipping...\n", + "No path found between 7761324 and 7193076. Skipping...\n", + "No path found between 7761324 and 1708829. Skipping...\n", + "No path found between 7761324 and 8335801016. Skipping...\n", + "No path found between 7761324 and 10012786. Skipping...\n", + "No path found between 7761324 and 697481080. Skipping...\n", + "No path found between 7761324 and 11703558140. Skipping...\n", + "No path found between 7761324 and 9095311. Skipping...\n", + "No path found between 7761324 and 2015910583. Skipping...\n", + "No path found between 7761324 and 1685990564. Skipping...\n", + "No path found between 7761324 and 2773541234. Skipping...\n", + "No path found between 7761324 and 364975. Skipping...\n", + "No path found between 7761324 and 366239. Skipping...\n", + "No path found between 7761324 and 9006793. Skipping...\n", + "No path found between 7761324 and 5239631. Skipping...\n", + "No path found between 7761324 and 7911101. Skipping...\n", + "No path found between 7761324 and 6991775. Skipping...\n", + "No path found between 7761324 and 7761119. Skipping...\n", + "No path found between 7761324 and 1326263755. Skipping...\n", + "No path found between 7761324 and 9348244. Skipping...\n", + "No path found between 7761324 and 3064952812. Skipping...\n", + "No path found between 7761324 and 9348242. Skipping...\n", + "No path found between 7761324 and 7761000. Skipping...\n", + "No path found between 7761324 and 9484916. Skipping...\n", + "No path found between 7761324 and 10214405. Skipping...\n", + "No path found between 7761324 and 9012907. Skipping...\n", + "No path found between 7761324 and 8300569. Skipping...\n", + "No path found between 7761324 and 225816087. Skipping...\n", + "No path found between 7761324 and 7379188. Skipping...\n", + "No path found between 7761324 and 8091817. Skipping...\n", + "No path found between 7761324 and 7135051. Skipping...\n", + "No path found between 7761324 and 364819. Skipping...\n", + "No path found between 7761324 and 225816119. Skipping...\n", + "No path found between 7761324 and 10382246. Skipping...\n", + "No path found between 7761324 and 7761450. Skipping...\n", + "No path found between 7761324 and 7761261. Skipping...\n", + "No path found between 7761324 and 883132505. Skipping...\n", + "No path found between 7761324 and 364963. Skipping...\n", + "No path found between 7761324 and 869010256. Skipping...\n", + "No path found between 7761324 and 9012909. Skipping...\n", + "No path found between 7761324 and 366249. Skipping...\n", + "No path found between 7761324 and 7193104. Skipping...\n", + "No path found between 7761324 and 7761254. Skipping...\n", + "No path found between 7761324 and 8784911. Skipping...\n", + "No path found between 7761324 and 8091816. Skipping...\n", + "No path found between 7761324 and 1808857. Skipping...\n", + "No path found between 7761324 and 4337327385. Skipping...\n", + "No path found between 7761324 and 364848. Skipping...\n", + "No path found between 7761324 and 10925539. Skipping...\n", + "No path found between 7761324 and 9659658885. Skipping...\n", + "No path found between 7761324 and 9484920. Skipping...\n", + "No path found between 7761324 and 9682690. Skipping...\n", + "No path found between 7761324 and 7761289. Skipping...\n", + "No path found between 7761324 and 366472. Skipping...\n", + "No path found between 7761324 and 7761240. Skipping...\n", + "No path found between 7761324 and 10382173. Skipping...\n", + "No path found between 7761324 and 10382164. Skipping...\n", + "No path found between 7761324 and 1685990555. Skipping...\n", + "No path found between 7761324 and 225816230. Skipping...\n", + "No path found between 7761324 and 10382084. Skipping...\n", + "No path found between 7761324 and 366482. Skipping...\n", + "No path found between 7761324 and 7761301. Skipping...\n", + "No path found between 7761324 and 8300562. Skipping...\n", + "No path found between 7761324 and 7134940. Skipping...\n", + "No path found between 7761324 and 10382125. Skipping...\n", + "No path found between 7761324 and 10382079. Skipping...\n", + "No path found between 7761324 and 2431481018. Skipping...\n", + "No path found between 7761324 and 10382107. Skipping...\n", + "No path found between 7761324 and 10810079. Skipping...\n", + "No path found between 7761324 and 1708832. Skipping...\n", + "No path found between 7761324 and 10382040. Skipping...\n", + "No path found between 7761324 and 1708814. Skipping...\n", + "No path found between 7761324 and 364914. Skipping...\n", + "No path found between 7761324 and 9795545. Skipping...\n", + "No path found between 7761324 and 9012908. Skipping...\n", + "No path found between 7761324 and 5239642. Skipping...\n", + "No path found between 7761324 and 10980576. Skipping...\n", + "No path found between 7761324 and 5239629. Skipping...\n", + "No path found between 7761324 and 8784989. Skipping...\n", + "No path found between 7761324 and 364927. Skipping...\n", + "No path found between 7761324 and 10214449. Skipping...\n", + "No path found between 7761324 and 8117585116. Skipping...\n", + "No path found between 7761324 and 9484917. Skipping...\n", + "No path found between 7761324 and 4136572210. Skipping...\n", + "No path found between 7761324 and 10382038. Skipping...\n", + "No path found between 7761324 and 9348295. Skipping...\n", + "No path found between 7761324 and 9095201. Skipping...\n", + "No path found between 7761324 and 9795469. Skipping...\n", + "No path found between 7761324 and 9465990. Skipping...\n", + "No path found between 7761324 and 10382097. Skipping...\n", + "No path found between 7761324 and 365617. Skipping...\n", + "No path found between 7761324 and 8784947. Skipping...\n", + "No path found between 7761324 and 8784950. Skipping...\n", + "No path found between 7761324 and 9465987. Skipping...\n", + "No path found between 7761324 and 8083511. Skipping...\n", + "No path found between 7761324 and 6991772. Skipping...\n", + "No path found between 7761324 and 403796568. Skipping...\n", + "No path found between 7761324 and 366267. Skipping...\n", + "No path found between 7761324 and 10382166. Skipping...\n", + "No path found between 7761324 and 10382177. Skipping...\n", + "No path found between 7761324 and 1708795. Skipping...\n", + "No path found between 7761324 and 3065155088. Skipping...\n", + "No path found between 7761324 and 3064952754. Skipping...\n", + "No path found between 7761324 and 8328537173. Skipping...\n", + "No path found between 7761324 and 9484774. Skipping...\n", + "No path found between 7761324 and 10925432. Skipping...\n", + "No path found between 7761324 and 7761331. Skipping...\n", + "No path found between 7761324 and 8091823. Skipping...\n", + "No path found between 7761324 and 364950. Skipping...\n", + "No path found between 7761324 and 364822. Skipping...\n", + "No path found between 7761324 and 9006790. Skipping...\n", + "No path found between 7761324 and 7761086. Skipping...\n", + "No path found between 7761324 and 8300647. Skipping...\n", + "No path found between 7761324 and 10810088. Skipping...\n", + "No path found between 7761324 and 1345248899. Skipping...\n", + "No path found between 7761324 and 364814. Skipping...\n", + "No path found between 7761324 and 6991780. Skipping...\n", + "No path found between 7761324 and 9682692. Skipping...\n", + "No path found between 7761324 and 5574194272. Skipping...\n", + "No path found between 7761324 and 8784903. Skipping...\n", + "No path found between 7761324 and 392690157. Skipping...\n", + "No path found between 7761324 and 10382148. Skipping...\n", + "No path found between 7761324 and 7661470431. Skipping...\n", + "No path found between 7761324 and 1309433558. Skipping...\n", + "No path found between 7761324 and 225816169. Skipping...\n", + "No path found between 7761324 and 9682592. Skipping...\n", + "No path found between 7761324 and 32126253. Skipping...\n", + "No path found between 7761324 and 366202. Skipping...\n", + "No path found between 7761324 and 7760983. Skipping...\n", + "No path found between 7761324 and 8300625. Skipping...\n", + "No path found between 7761324 and 4136572207. Skipping...\n", + "No path found between 7761324 and 5573028710. Skipping...\n", + "No path found between 7761324 and 365087. Skipping...\n", + "No path found between 7761324 and 364880. Skipping...\n", + "No path found between 7761324 and 8091839. Skipping...\n", + "No path found between 7761324 and 10980613. Skipping...\n", + "No path found between 7761324 and 9484808. Skipping...\n", + "No path found between 7761324 and 7379176. Skipping...\n", + "No path found between 7761324 and 5239644. Skipping...\n", + "No path found between 7761324 and 392690408. Skipping...\n", + "No path found between 7761324 and 7134999. Skipping...\n", + "No path found between 7761324 and 10382178. Skipping...\n", + "No path found between 7761324 and 364946. Skipping...\n", + "No path found between 7761324 and 10980603. Skipping...\n", + "No path found between 7761324 and 7638526280. Skipping...\n", + "No path found between 7761324 and 3065154579. Skipping...\n", + "No path found between 7761324 and 7911098. Skipping...\n", + "No path found between 7761324 and 366466. Skipping...\n", + "No path found between 7761324 and 9095222. Skipping...\n", + "No path found between 7761324 and 2831569625. Skipping...\n", + "No path found between 7761324 and 3065155346. Skipping...\n", + "No path found between 7761324 and 7761349. Skipping...\n", + "No path found between 7761324 and 5239632. Skipping...\n", + "No path found between 7761324 and 7577946730. Skipping...\n", + "No path found between 7761324 and 10382080. Skipping...\n", + "No path found between 7761324 and 3065158694. Skipping...\n", + "No path found between 7761324 and 8334443. Skipping...\n", + "No path found between 7761324 and 10214448. Skipping...\n", + "No path found between 7761324 and 1708867. Skipping...\n", + "No path found between 7761324 and 3068464115. Skipping...\n", + "No path found between 7761324 and 9012918. Skipping...\n", + "No path found between 7761324 and 8087110. Skipping...\n", + "No path found between 7761324 and 7193011. Skipping...\n", + "No path found between 7761324 and 7760976. Skipping...\n", + "No path found between 7761324 and 366461. Skipping...\n", + "No path found between 7761324 and 7761328. Skipping...\n", + "No path found between 7761324 and 7761304. Skipping...\n", + "No path found between 7761324 and 11290430. Skipping...\n", + "No path found between 7761324 and 10382088. Skipping...\n", + "No path found between 7761324 and 7760962. Skipping...\n", + "No path found between 7761324 and 9348241. Skipping...\n", + "No path found between 7761324 and 26120444. Skipping...\n", + "No path found between 7761324 and 11703558139. Skipping...\n", + "No path found between 7761324 and 9484796. Skipping...\n", + "No path found between 7761324 and 7135044. Skipping...\n", + "No path found between 7761324 and 1708827. Skipping...\n", + "No path found between 7761324 and 151215655. Skipping...\n", + "No path found between 7761324 and 8334527. Skipping...\n", + "No path found between 7761324 and 7761239. Skipping...\n", + "No path found between 7761324 and 10010831. Skipping...\n", + "No path found between 7761324 and 5574194269. Skipping...\n", + "No path found between 7761324 and 7193075. Skipping...\n", + "No path found between 7761324 and 3065580105. Skipping...\n", + "No path found between 7761324 and 10382165. Skipping...\n", + "No path found between 7761324 and 366257. Skipping...\n", + "No path found between 7761324 and 7661470430. Skipping...\n", + "No path found between 7761324 and 365571. Skipping...\n", + "No path found between 7761324 and 10980568. Skipping...\n", + "No path found between 7761324 and 583474475. Skipping...\n", + "No path found between 7761324 and 10382126. Skipping...\n", + "No path found between 7761324 and 9682560. Skipping...\n", + "No path found between 7761324 and 10855008. Skipping...\n", + "No path found between 7761324 and 7193073. Skipping...\n", + "No path found between 7761324 and 7379189. Skipping...\n", + "No path found between 7761324 and 5239646. Skipping...\n", + "No path found between 7761324 and 9095145. Skipping...\n", + "No path found between 7761324 and 7761310. Skipping...\n", + "No path found between 7761324 and 9465982. Skipping...\n", + "No path found between 7761324 and 9682686. Skipping...\n", + "No path found between 7761324 and 364982. Skipping...\n", + "No path found between 7761324 and 8837239148. Skipping...\n", + "No path found between 7761324 and 10980566. Skipping...\n", + "No path found between 7761324 and 9006738. Skipping...\n", + "No path found between 7761324 and 10010830. Skipping...\n", + "No path found between 7761324 and 7761292. Skipping...\n", + "No path found between 7761324 and 364828. Skipping...\n", + "No path found between 7761324 and 10010638. Skipping...\n", + "No path found between 7761324 and 9484921. Skipping...\n", + "No path found between 7761324 and 365553. Skipping...\n", + "No path found between 7761324 and 9095144. Skipping...\n", + "No path found between 7761324 and 2431480994. Skipping...\n", + "No path found between 7761324 and 366484. Skipping...\n", + "No path found between 7761324 and 10925442. Skipping...\n", + "No path found between 7761324 and 9831959. Skipping...\n", + "No path found between 7761324 and 10980575. Skipping...\n", + "No path found between 7761324 and 10866203. Skipping...\n", + "No path found between 7761324 and 9795544. Skipping...\n", + "No path found between 7761324 and 364851. Skipping...\n", + "No path found between 7761324 and 10382063. Skipping...\n", + "No path found between 7761324 and 7193071. Skipping...\n", + "No path found between 7761324 and 7761186. Skipping...\n", + "No path found between 7761324 and 7761267. Skipping...\n", + "No path found between 7761324 and 3630145573. Skipping...\n", + "No path found between 7761324 and 7760997. Skipping...\n", + "No path found between 7761324 and 4336198950. Skipping...\n", + "No path found between 7761324 and 10810082. Skipping...\n", + "No path found between 7761324 and 9348246. Skipping...\n", + "No path found between 7761324 and 7760960. Skipping...\n", + "No path found between 7761324 and 10980571. Skipping...\n", + "No path found between 7761324 and 2431481032. Skipping...\n", + "No path found between 7761324 and 364910. Skipping...\n", + "No path found between 7761324 and 365418. Skipping...\n", + "No path found between 7761324 and 8335801015. Skipping...\n", + "No path found between 7761324 and 10980615. Skipping...\n", + "No path found between 7761324 and 9682457. Skipping...\n", + "No path found between 7761324 and 7761248. Skipping...\n", + "No path found between 7761324 and 1375732743. Skipping...\n", + "No path found between 7761324 and 7193004. Skipping...\n", + "No path found between 7761324 and 9682693. Skipping...\n", + "No path found between 7761324 and 1685990563. Skipping...\n", + "No path found between 7761324 and 7761283. Skipping...\n", + "No path found between 7761324 and 5683384300. Skipping...\n", + "No path found between 7761324 and 582823188. Skipping...\n", + "No path found between 7761324 and 366468. Skipping...\n", + "No path found between 7761324 and 10382106. Skipping...\n", + "No path found between 7761324 and 7761285. Skipping...\n", + "No path found between 7761324 and 10810076. Skipping...\n", + "No path found between 7761324 and 9442870419. Skipping...\n", + "No path found between 7761324 and 7761061. Skipping...\n", + "No path found between 7761324 and 3068135939. Skipping...\n", + "No path found between 7761324 and 10382065. Skipping...\n", + "No path found between 7761324 and 365446. Skipping...\n", + "No path found between 7761324 and 8300626. Skipping...\n", + "No path found between 7761324 and 8784904. Skipping...\n", + "No path found between 7761324 and 7761062. Skipping...\n", + "No path found between 7761324 and 10382162. Skipping...\n", + "No path found between 7761324 and 10382070. Skipping...\n", + "No path found between 7761324 and 2839133464. Skipping...\n", + "No path found between 7761324 and 5574194278. Skipping...\n", + "No path found between 7761324 and 225816133. Skipping...\n", + "No path found between 7761324 and 3630145575. Skipping...\n", + "No path found between 7761324 and 2827743266. Skipping...\n", + "No path found between 7761324 and 10925446. Skipping...\n", + "No path found between 7761324 and 3065582227. Skipping...\n", + "No path found between 7761324 and 9006745. Skipping...\n", + "No path found between 7761324 and 7193005. Skipping...\n", + "No path found between 7761324 and 5574194268. Skipping...\n", + "No path found between 7761324 and 7135031. Skipping...\n", + "No path found between 7761324 and 10980596. Skipping...\n", + "No path found between 7761324 and 7636462571. Skipping...\n", + "No path found between 7761324 and 4311659314. Skipping...\n", + "No path found between 7761324 and 10980640. Skipping...\n", + "No path found between 7761324 and 8785049. Skipping...\n", + "No path found between 7761324 and 366254. Skipping...\n", + "No path found between 7761324 and 366201. Skipping...\n", + "No path found between 7761324 and 403887715. Skipping...\n", + "No path found between 7761324 and 8087109. Skipping...\n", + "No path found between 7761324 and 7761206. Skipping...\n", + "No path found between 7761324 and 7761262. Skipping...\n", + "No path found between 7761324 and 8837239137. Skipping...\n", + "No path found between 7761324 and 7761280. Skipping...\n", + "No path found between 7761324 and 364919. Skipping...\n", + "No path found between 7761324 and 364995. Skipping...\n", + "No path found between 7761324 and 7135048. Skipping...\n", + "No path found between 7761324 and 9484775. Skipping...\n", + "No path found between 7761324 and 5683384269. Skipping...\n", + "No path found between 7761324 and 10810087. Skipping...\n", + "No path found between 7761324 and 8300589. Skipping...\n", + "No path found between 7761324 and 1375728592. Skipping...\n", + "No path found between 7761324 and 7761298. Skipping...\n", + "No path found between 7761324 and 225816042. Skipping...\n", + "No path found between 7761324 and 10382227. Skipping...\n", + "No path found between 7761324 and 7379194. Skipping...\n", + "No path found between 7761324 and 366247. Skipping...\n", + "No path found between 7761324 and 3065581208. Skipping...\n", + "No path found between 7761324 and 7193084. Skipping...\n", + "No path found between 7761324 and 7761315. Skipping...\n", + "No path found between 7761324 and 532497213. Skipping...\n", + "No path found between 7761324 and 392690520. Skipping...\n", + "No path found between 7761324 and 9682461. Skipping...\n", + "No path found between 7761324 and 364972. Skipping...\n", + "No path found between 7761324 and 2619903057. Skipping...\n", + "No path found between 7761324 and 7911171. Skipping...\n", + "No path found between 7761324 and 364884. Skipping...\n", + "No path found between 7761324 and 7193044. Skipping...\n", + "No path found between 7761324 and 10382039. Skipping...\n", + "No path found between 7761324 and 9682460. Skipping...\n", + "No path found between 7761324 and 366237. Skipping...\n", + "No path found between 7761324 and 2431481029. Skipping...\n", + "No path found between 7761324 and 583510634. Skipping...\n", + "No path found between 7761324 and 5573028709. Skipping...\n", + "No path found between 7761324 and 9376034. Skipping...\n", + "No path found between 7761324 and 3068468634. Skipping...\n", + "No path found between 7761324 and 10382175. Skipping...\n", + "No path found between 7761324 and 7911186. Skipping...\n", + "No path found between 7761324 and 7761278. Skipping...\n", + "No path found between 7761324 and 10382118. Skipping...\n", + "No path found between 7761324 and 1708856. Skipping...\n", + "No path found between 7761324 and 10925433. Skipping...\n", + "No path found between 7761324 and 10925430. Skipping...\n", + "No path found between 7761324 and 7193106. Skipping...\n", + "No path found between 7761324 and 10214450. Skipping...\n", + "No path found between 7761324 and 8300639. Skipping...\n", + "No path found between 7761324 and 7134941. Skipping...\n", + "No path found between 7761324 and 2831569613. Skipping...\n", + "No path found between 7761324 and 7760988. Skipping...\n", + "No path found between 7761324 and 9484770. Skipping...\n", + "No path found between 7761324 and 1297723547. Skipping...\n", + "No path found between 7761324 and 10382089. Skipping...\n", + "No path found between 7761324 and 7193077. Skipping...\n", + "No path found between 7761324 and 9095200. Skipping...\n", + "No path found between 7761324 and 365590. Skipping...\n", + "No path found between 7761324 and 7761229. Skipping...\n", + "No path found between 7761324 and 10925541. Skipping...\n", + "No path found between 7761324 and 364815. Skipping...\n", + "No path found between 7761324 and 10382062. Skipping...\n", + "No path found between 7761324 and 9795543. Skipping...\n", + "No path found between 7761324 and 7193012. Skipping...\n", + "No path found between 7761324 and 5574670103. Skipping...\n", + "No path found between 7761324 and 392690407. Skipping...\n", + "No path found between 7761324 and 9682458. Skipping...\n", + "No path found between 7761324 and 5683384259. Skipping...\n", + "No path found between 7761324 and 7379191. Skipping...\n", + "No path found between 7761324 and 7761351. Skipping...\n", + "No path found between 7761324 and 7761306. Skipping...\n", + "No path found between 7761324 and 2434915678. Skipping...\n", + "No path found between 7761324 and 1182840231. Skipping...\n", + "No path found between 7761324 and 7193115. Skipping...\n", + "No path found between 7761324 and 366277. Skipping...\n", + "No path found between 7761324 and 10382041. Skipping...\n", + "No path found between 7761324 and 7135043. Skipping...\n", + "No path found between 7761324 and 8300563. Skipping...\n", + "No path found between 7761324 and 364811. Skipping...\n", + "No path found between 7761324 and 9006764. Skipping...\n", + "No path found between 7761324 and 2985883608. Skipping...\n", + "No path found between 7761324 and 4311659315. Skipping...\n", + "No path found between 7761324 and 5239640. Skipping...\n", + "No path found between 7761324 and 7761274. Skipping...\n", + "No path found between 7761324 and 4337327387. Skipping...\n", + "No path found between 7761324 and 7155044293. Skipping...\n", + "No path found between 7761324 and 8300576. Skipping...\n", + "No path found between 7761324 and 10214504. Skipping...\n", + "No path found between 7761324 and 10382228. Skipping...\n", + "No path found between 7761324 and 4378048314. Skipping...\n", + "No path found between 7761324 and 1691283624. Skipping...\n", + "No path found between 7761324 and 7379190. Skipping...\n", + "No path found between 7761324 and 366262. Skipping...\n", + "No path found between 7761324 and 4760747125. Skipping...\n", + "No path found between 7761324 and 10382032. Skipping...\n", + "No path found between 7761324 and 7135035. Skipping...\n", + "No path found between 7761324 and 2839133463. Skipping...\n", + "No path found between 7761324 and 7761344. Skipping...\n", + "No path found between 7761324 and 9012920. Skipping...\n", + "No path found between 7761324 and 7761487. Skipping...\n", + "No path found between 7761324 and 365552. Skipping...\n", + "No path found between 7761324 and 1708815. Skipping...\n", + "No path found between 7761324 and 10980569. Skipping...\n", + "No path found between 7761324 and 7577946732. Skipping...\n", + "No path found between 7761324 and 8300995112. Skipping...\n", + "No path found between 7761324 and 366255. Skipping...\n", + "No path found between 7761324 and 392690370. Skipping...\n", + "No path found between 7761324 and 7073150. Skipping...\n", + "No path found between 7761324 and 8300578. Skipping...\n", + "No path found between 7761324 and 8300540. Skipping...\n", + "No path found between 7761324 and 10382176. Skipping...\n", + "No path found between 7761324 and 1782387239. Skipping...\n", + "No path found between 7761324 and 366198. Skipping...\n", + "No path found between 7761324 and 9659658886. Skipping...\n", + "No path found between 7761324 and 9484810. Skipping...\n", + "No path found between 7761324 and 10382160. Skipping...\n", + "No path found between 7761324 and 7761258. Skipping...\n", + "No path found between 7761324 and 9006762. Skipping...\n", + "No path found between 7761324 and 8300592. Skipping...\n", + "No path found between 7761324 and 10980574. Skipping...\n", + "No path found between 7761324 and 9795541. Skipping...\n", + "No path found between 7761324 and 7761263. Skipping...\n", + "No path found between 7761324 and 10010621. Skipping...\n", + "No path found between 7761324 and 3065157690. Skipping...\n", + "No path found between 7761324 and 10214447. Skipping...\n", + "No path found between 7761324 and 4311659306. Skipping...\n", + "No path found between 7761324 and 8784943. Skipping...\n", + "No path found between 7761324 and 319946667. Skipping...\n", + "No path found between 7761324 and 10925444. Skipping...\n", + "No path found between 7761324 and 7761244. Skipping...\n", + "No path found between 7761324 and 14378897. Skipping...\n", + "No path found between 7761324 and 10382061. Skipping...\n", + "No path found between 7761324 and 9484910. Skipping...\n", + "No path found between 7761324 and 10382233. Skipping...\n", + "No path found between 7761324 and 9682689. Skipping...\n", + "No path found between 7761324 and 3065158643. Skipping...\n", + "No path found between 7761324 and 7193113. Skipping...\n", + "No path found between 7761324 and 10980597. Skipping...\n", + "No path found between 7761324 and 10980612. Skipping...\n", + "No path found between 7761324 and 9006833. Skipping...\n", + "No path found between 7761324 and 1342125559. Skipping...\n", + "No path found between 7761324 and 225816237. Skipping...\n", + "No path found between 7761324 and 8086196. Skipping...\n", + "No path found between 7761324 and 7761268. Skipping...\n", + "No path found between 7761324 and 10010619. Skipping...\n", + "No path found between 7761324 and 7761460. Skipping...\n", + "No path found between 7761324 and 364977. Skipping...\n", + "No path found between 7761324 and 5239633. Skipping...\n", + "No path found between 7761324 and 366210. Skipping...\n", + "No path found between 7761324 and 2985883571. Skipping...\n", + "No path found between 7761324 and 8837239127. Skipping...\n", + "No path found between 7761324 and 10855009. Skipping...\n", + "No path found between 7761324 and 7911276. Skipping...\n", + "No path found between 7761324 and 8447477702. Skipping...\n", + "No path found between 7761324 and 3065156829. Skipping...\n", + "No path found between 7761324 and 9484918. Skipping...\n", + "No path found between 7761324 and 2300548965. Skipping...\n", + "No path found between 7761324 and 8334445. Skipping...\n", + "No path found between 7761324 and 841598831. Skipping...\n", + "No path found between 7761324 and 7761345. Skipping...\n", + "No path found between 7761324 and 7761350. Skipping...\n", + "No path found between 7761324 and 8078845. Skipping...\n", + "No path found between 7761324 and 8784998. Skipping...\n", + "No path found between 7761324 and 10929257. Skipping...\n", + "No path found between 7761324 and 1708866. Skipping...\n", + "No path found between 7761324 and 7193082. Skipping...\n", + "No path found between 7761324 and 5239639. Skipping...\n", + "No path found between 7761324 and 392690521. Skipping...\n", + "No path found between 7761324 and 7761462. Skipping...\n", + "No path found between 7761324 and 403796569. Skipping...\n", + "No path found between 7761324 and 2827743265. Skipping...\n", + "No path found between 7761324 and 9484826. Skipping...\n", + "No path found between 7761324 and 606427435. Skipping...\n", + "No path found between 7761324 and 7193010. Skipping...\n", + "No path found between 7761324 and 7761272. Skipping...\n", + "No path found between 7761324 and 11290426. Skipping...\n", + "No path found between 7761324 and 10382121. Skipping...\n", + "No path found between 7761324 and 8837239138. Skipping...\n", + "No path found between 7761324 and 2773541209. Skipping...\n", + "No path found between 7761324 and 7761260. Skipping...\n", + "No path found between 7761324 and 365451. Skipping...\n", + "No path found between 7761324 and 9682747. Skipping...\n", + "No path found between 7761324 and 9006736. Skipping...\n", + "No path found between 7761324 and 7761010. Skipping...\n", + "No path found between 7761324 and 9376036. Skipping...\n", + "No path found between 7761324 and 10980563. Skipping...\n", + "No path found between 7761324 and 9006789. Skipping...\n", + "No path found between 7761324 and 10854963. Skipping...\n", + "No path found between 7761324 and 7760961. Skipping...\n", + "No path found between 7761324 and 7379187. Skipping...\n", + "No path found between 7761324 and 4136572206. Skipping...\n", + "No path found between 7761324 and 9682598. Skipping...\n", + "No path found between 7761324 and 5683384294. Skipping...\n", + "No path found between 7761324 and 1708802. Skipping...\n", + "No path found between 7761324 and 366258. Skipping...\n", + "No path found between 7761324 and 10929256. Skipping...\n", + "No path found between 7761324 and 3835715634. Skipping...\n", + "No path found between 7761324 and 11703558141. Skipping...\n", + "No path found between 7761324 and 364958. Skipping...\n", + "No path found between 7761324 and 583474488. Skipping...\n", + "No path found between 7761324 and 9795477. Skipping...\n", + "No path found between 7761324 and 7911172. Skipping...\n", + "No path found between 7761324 and 9348213. Skipping...\n", + "No path found between 7761324 and 7761323. Skipping...\n", + "No path found between 7761324 and 9682459. Skipping...\n", + "No path found between 7761324 and 365601. Skipping...\n", + "No path found between 7761324 and 10855007. Skipping...\n", + "No path found between 7761324 and 365561. Skipping...\n", + "No path found between 7761324 and 10214402. Skipping...\n", + "No path found between 7761324 and 8334436. Skipping...\n", + "No path found between 7761324 and 583243035. Skipping...\n", + "No path found between 7761324 and 4136572202. Skipping...\n", + "No path found between 7761324 and 7135040. Skipping...\n", + "No path found between 7761324 and 8837239133. Skipping...\n", + "No path found between 7761324 and 9006815. Skipping...\n", + "No path found between 7761324 and 366264. Skipping...\n", + "No path found between 7761331 and 7761328. Skipping...\n", + "No path found between 7761344 and 7761328. Skipping...\n", + "No path found between 7761345 and 7761328. Skipping...\n", + "No path found between 7761345 and 7761331. Skipping...\n", + "No path found between 7761349 and 7761328. Skipping...\n", + "No path found between 7761349 and 7761331. Skipping...\n", + "No path found between 7761350 and 7761331. Skipping...\n", + "No path found between 7761350 and 7761328. Skipping...\n", + "No path found between 7761351 and 7761331. Skipping...\n", + "No path found between 7761351 and 7761328. Skipping...\n", + "No path found between 1326263755 and 7761331. Skipping...\n", + "No path found between 10925539 and 7761331. Skipping...\n", + "No path found between 10925539 and 7761328. Skipping...\n", + "No path found between 10925541 and 7761328. Skipping...\n", + "No path found between 10925541 and 7761331. Skipping...\n", + "No path found between 10010619 and 7761328. Skipping...\n", + "No path found between 10010619 and 7761331. Skipping...\n", + "No path found between 225816042 and 7761328. Skipping...\n", + "No path found between 225816042 and 7761331. Skipping...\n", + "No path found between 10010621 and 7761331. Skipping...\n", + "No path found between 1342125559 and 7761328. Skipping...\n", + "No path found between 1342125559 and 7761331. Skipping...\n", + "No path found between 8078845 and 7761331. Skipping...\n", + "No path found between 8078845 and 7761328. Skipping...\n", + "No path found between 9682444 and 7761331. Skipping...\n", + "No path found between 3065155088 and 7761328. Skipping...\n", + "No path found between 9831958 and 7761331. Skipping...\n", + "No path found between 9831958 and 7761328. Skipping...\n", + "No path found between 9831959 and 7761331. Skipping...\n", + "No path found between 9831959 and 7761328. Skipping...\n", + "No path found between 225816087 and 7761331. Skipping...\n", + "No path found between 225816087 and 7761328. Skipping...\n", + "No path found between 9682457 and 7761331. Skipping...\n", + "No path found between 9682458 and 7761331. Skipping...\n", + "No path found between 9682458 and 7761328. Skipping...\n", + "No path found between 9682459 and 7761328. Skipping...\n", + "No path found between 9682459 and 7761331. Skipping...\n", + "No path found between 9682460 and 7761328. Skipping...\n", + "No path found between 9682460 and 7761331. Skipping...\n", + "No path found between 9682461 and 7761331. Skipping...\n", + "No path found between 9682461 and 7761328. Skipping...\n", + "No path found between 10866203 and 7761328. Skipping...\n", + "No path found between 7761447 and 7761328. Skipping...\n", + "No path found between 151215655 and 7761331. Skipping...\n", + "No path found between 151215655 and 7761328. Skipping...\n", + "No path found between 7761450 and 7761328. Skipping...\n", + "No path found between 7761457 and 7761331. Skipping...\n", + "No path found between 7761457 and 7761328. Skipping...\n", + "No path found between 7761460 and 7761328. Skipping...\n", + "No path found between 7761460 and 7761331. Skipping...\n", + "No path found between 7761462 and 7761328. Skipping...\n", + "No path found between 7761462 and 7761331. Skipping...\n", + "No path found between 225816119 and 7761328. Skipping...\n", + "No path found between 225816119 and 7761331. Skipping...\n", + "No path found between 8087109 and 7761331. Skipping...\n", + "No path found between 8087109 and 7761328. Skipping...\n", + "No path found between 8087110 and 7761331. Skipping...\n", + "No path found between 8087110 and 7761328. Skipping...\n", + "No path found between 225816133 and 7761331. Skipping...\n", + "No path found between 225816133 and 7761328. Skipping...\n", + "No path found between 7761487 and 7761328. Skipping...\n", + "No path found between 3455331920 and 7761331. Skipping...\n", + "No path found between 3455331920 and 7761328. Skipping...\n", + "No path found between 7761489 and 7761331. Skipping...\n", + "No path found between 7761489 and 7761328. Skipping...\n", + "No path found between 403887715 and 7761328. Skipping...\n", + "No path found between 403887715 and 7761331. Skipping...\n", + "No path found between 583487076 and 7761331. Skipping...\n", + "No path found between 8087145 and 7761331. Skipping...\n", + "No path found between 8087145 and 7761328. Skipping...\n", + "No path found between 225816169 and 7761328. Skipping...\n", + "No path found between 366198 and 7761331. Skipping...\n", + "No path found between 366198 and 7761328. Skipping...\n", + "No path found between 366201 and 7761328. Skipping...\n", + "No path found between 366202 and 7761328. Skipping...\n", + "No path found between 366202 and 7761331. Skipping...\n", + "No path found between 9682560 and 7761331. Skipping...\n", + "No path found between 9682560 and 7761328. Skipping...\n", + "No path found between 9682561 and 7761328. Skipping...\n", + "No path found between 366210 and 7761328. Skipping...\n", + "No path found between 366210 and 7761331. Skipping...\n", + "No path found between 1345248899 and 7761328. Skipping...\n", + "No path found between 1345248899 and 7761331. Skipping...\n", + "No path found between 366215 and 7761331. Skipping...\n", + "No path found between 366215 and 7761328. Skipping...\n", + "No path found between 9006736 and 7761331. Skipping...\n", + "No path found between 9006736 and 7761328. Skipping...\n", + "No path found between 583958160 and 7761328. Skipping...\n", + "No path found between 583958160 and 7761331. Skipping...\n", + "No path found between 9006738 and 7761331. Skipping...\n", + "No path found between 366228 and 7761331. Skipping...\n", + "No path found between 366228 and 7761328. Skipping...\n", + "No path found between 3065581208 and 7761331. Skipping...\n", + "No path found between 3065581208 and 7761328. Skipping...\n", + "No path found between 9006745 and 7761328. Skipping...\n", + "No path found between 9006745 and 7761331. Skipping...\n", + "No path found between 366237 and 7761331. Skipping...\n", + "No path found between 366237 and 7761328. Skipping...\n", + "No path found between 366239 and 7761331. Skipping...\n", + "No path found between 9682592 and 7761331. Skipping...\n", + "No path found between 9682592 and 7761328. Skipping...\n", + "No path found between 366245 and 7761331. Skipping...\n", + "No path found between 366245 and 7761328. Skipping...\n", + "No path found between 9682598 and 7761331. Skipping...\n", + "No path found between 9682598 and 7761328. Skipping...\n", + "No path found between 366247 and 7761328. Skipping...\n", + "No path found between 225816230 and 7761328. Skipping...\n", + "No path found between 225816230 and 7761331. Skipping...\n", + "No path found between 366249 and 7761328. Skipping...\n", + "No path found between 366249 and 7761331. Skipping...\n", + "No path found between 9006762 and 7761331. Skipping...\n", + "No path found between 9006762 and 7761328. Skipping...\n", + "No path found between 9012907 and 7761328. Skipping...\n", + "No path found between 9012907 and 7761331. Skipping...\n", + "No path found between 9006764 and 7761328. Skipping...\n", + "No path found between 9006764 and 7761331. Skipping...\n", + "No path found between 9012908 and 7761331. Skipping...\n", + "No path found between 9012908 and 7761328. Skipping...\n", + "No path found between 366254 and 7761328. Skipping...\n", + "No path found between 366254 and 7761331. Skipping...\n", + "No path found between 366255 and 7761331. Skipping...\n", + "No path found between 366255 and 7761328. Skipping...\n", + "No path found between 9012909 and 7761328. Skipping...\n", + "No path found between 9012909 and 7761331. Skipping...\n", + "No path found between 366257 and 7761328. Skipping...\n", + "No path found between 366257 and 7761331. Skipping...\n", + "No path found between 366258 and 7761328. Skipping...\n", + "No path found between 366258 and 7761331. Skipping...\n", + "No path found between 225816237 and 7761331. Skipping...\n", + "No path found between 366262 and 7761331. Skipping...\n", + "No path found between 366262 and 7761328. Skipping...\n", + "No path found between 9012918 and 7761328. Skipping...\n", + "No path found between 9012918 and 7761331. Skipping...\n", + "No path found between 366264 and 7761328. Skipping...\n", + "No path found between 366264 and 7761331. Skipping...\n", + "No path found between 9012920 and 7761331. Skipping...\n", + "No path found between 9012920 and 7761328. Skipping...\n", + "No path found between 7911098 and 7761331. Skipping...\n", + "No path found between 366267 and 7761328. Skipping...\n", + "No path found between 366267 and 7761331. Skipping...\n", + "No path found between 366268 and 7761328. Skipping...\n", + "No path found between 366268 and 7761331. Skipping...\n", + "No path found between 7911101 and 7761328. Skipping...\n", + "No path found between 7911101 and 7761331. Skipping...\n", + "No path found between 1268702915 and 7761328. Skipping...\n", + "No path found between 1268702915 and 7761331. Skipping...\n", + "No path found between 366277 and 7761331. Skipping...\n", + "No path found between 366277 and 7761328. Skipping...\n", + "No path found between 9006789 and 7761328. Skipping...\n", + "No path found between 9006789 and 7761331. Skipping...\n", + "No path found between 9006790 and 7761331. Skipping...\n", + "No path found between 9006793 and 7761331. Skipping...\n", + "No path found between 9006793 and 7761328. Skipping...\n", + "No path found between 9006798 and 7761331. Skipping...\n", + "No path found between 9006798 and 7761328. Skipping...\n", + "No path found between 1309433558 and 7761331. Skipping...\n", + "No path found between 1309433558 and 7761328. Skipping...\n", + "No path found between 7134940 and 7761331. Skipping...\n", + "No path found between 7134940 and 7761328. Skipping...\n", + "No path found between 7134941 and 7761328. Skipping...\n", + "No path found between 7134941 and 7761331. Skipping...\n", + "No path found between 9006815 and 7761328. Skipping...\n", + "No path found between 9006815 and 7761331. Skipping...\n", + "No path found between 9006833 and 7761328. Skipping...\n", + "No path found between 9682686 and 7761328. Skipping...\n", + "No path found between 9682688 and 7761328. Skipping...\n", + "No path found between 9682689 and 7761328. Skipping...\n", + "No path found between 9682689 and 7761331. Skipping...\n", + "No path found between 9682690 and 7761328. Skipping...\n", + "No path found between 9682690 and 7761331. Skipping...\n", + "No path found between 7911171 and 7761328. Skipping...\n", + "No path found between 7911171 and 7761331. Skipping...\n", + "No path found between 7911172 and 7761331. Skipping...\n", + "No path found between 9682692 and 7761328. Skipping...\n", + "No path found between 9682692 and 7761331. Skipping...\n", + "No path found between 9682693 and 7761331. Skipping...\n", + "No path found between 9682693 and 7761328. Skipping...\n", + "No path found between 7911186 and 7761331. Skipping...\n", + "No path found between 3065155346 and 7761331. Skipping...\n", + "No path found between 3065155346 and 7761328. Skipping...\n", + "No path found between 7911190 and 7761328. Skipping...\n", + "No path found between 7911190 and 7761331. Skipping...\n", + "No path found between 7134999 and 7761328. Skipping...\n", + "No path found between 7134999 and 7761331. Skipping...\n", + "No path found between 5574670103 and 7761328. Skipping...\n", + "No path found between 5574670103 and 7761331. Skipping...\n", + "No path found between 540028708 and 3630145573. Skipping...\n", + "No path found between 540028708 and 6991772. Skipping...\n", + "No path found between 540028708 and 583487076. Skipping...\n", + "No path found between 540028708 and 10980571. Skipping...\n", + "No path found between 540028708 and 10925444. Skipping...\n", + "No path found between 540028708 and 7761310. Skipping...\n", + "No path found between 540028708 and 9006815. Skipping...\n", + "No path found between 540028708 and 366245. Skipping...\n", + "No path found between 540028708 and 8078845. Skipping...\n", + "No path found between 540028708 and 364975. Skipping...\n", + "No path found between 540028708 and 10382127. Skipping...\n", + "No path found between 540028708 and 366484. Skipping...\n", + "No path found between 540028708 and 10382227. Skipping...\n", + "No path found between 540028708 and 3065158694. Skipping...\n", + "No path found between 540028708 and 365087. Skipping...\n", + "No path found between 540028708 and 7193044. Skipping...\n", + "No path found between 540028708 and 10866203. Skipping...\n", + "No path found between 540028708 and 3630145581. Skipping...\n", + "No path found between 540028708 and 9006789. Skipping...\n", + "No path found between 540028708 and 5573028710. Skipping...\n", + "No path found between 540028708 and 2431481015. Skipping...\n", + "No path found between 540028708 and 7577946732. Skipping...\n", + "No path found between 540028708 and 9659658885. Skipping...\n", + "No path found between 540028708 and 7135044. Skipping...\n", + "No path found between 540028708 and 365446. Skipping...\n", + "No path found between 540028708 and 7761258. Skipping...\n", + "No path found between 540028708 and 11290430. Skipping...\n", + "No path found between 540028708 and 1685990560. Skipping...\n", + "No path found between 540028708 and 10925442. Skipping...\n", + "No path found between 540028708 and 392690157. Skipping...\n", + "No path found between 540028708 and 9795545. Skipping...\n", + "No path found between 540028708 and 366198. Skipping...\n", + "No path found between 540028708 and 7761052. Skipping...\n", + "No path found between 540028708 and 7193115. Skipping...\n", + "No path found between 540028708 and 532497213. Skipping...\n", + "No path found between 540028708 and 9465982. Skipping...\n", + "No path found between 540028708 and 1685990538. Skipping...\n", + "No path found between 540028708 and 10925539. Skipping...\n", + "No path found between 540028708 and 8784950. Skipping...\n", + "No path found between 540028708 and 10382061. Skipping...\n", + "No path found between 540028708 and 5683384300. Skipping...\n", + "No path found between 540028708 and 413910054. Skipping...\n", + "No path found between 540028708 and 7193077. Skipping...\n", + "No path found between 540028708 and 10382162. Skipping...\n", + "No path found between 540028708 and 7135037. Skipping...\n", + "No path found between 540028708 and 1326263755. Skipping...\n", + "No path found between 540028708 and 9484919. Skipping...\n", + "No path found between 540028708 and 365601. Skipping...\n", + "No path found between 540028708 and 10382246. Skipping...\n", + "No path found between 540028708 and 7193010. Skipping...\n", + "No path found between 540028708 and 10980568. Skipping...\n", + "No path found between 540028708 and 1708846. Skipping...\n", + "No path found between 540028708 and 9484916. Skipping...\n", + "No path found between 540028708 and 8300563. Skipping...\n", + "No path found between 540028708 and 8300639. Skipping...\n", + "No path found between 540028708 and 7761254. Skipping...\n", + "No path found between 540028708 and 7761267. Skipping...\n", + "No path found between 540028708 and 8117586120. Skipping...\n", + "No path found between 540028708 and 9484921. Skipping...\n", + "No path found between 540028708 and 9376036. Skipping...\n", + "No path found between 540028708 and 583474488. Skipping...\n", + "No path found between 540028708 and 3065157690. Skipping...\n", + "No path found between 540028708 and 9682461. Skipping...\n", + "No path found between 540028708 and 7193071. Skipping...\n", + "No path found between 540028708 and 10382160. Skipping...\n", + "No path found between 540028708 and 5239640. Skipping...\n", + "No path found between 540028708 and 364814. Skipping...\n", + "No path found between 540028708 and 9348246. Skipping...\n", + "No path found between 540028708 and 9682747. Skipping...\n", + "No path found between 540028708 and 7193011. Skipping...\n", + "No path found between 540028708 and 7761344. Skipping...\n", + "No path found between 540028708 and 9682459. Skipping...\n", + "No path found between 540028708 and 7761257. Skipping...\n", + "No path found between 540028708 and 2121251151. Skipping...\n", + "No path found between 540028708 and 8334445. Skipping...\n", + "No path found between 540028708 and 8784947. Skipping...\n", + "No path found between 540028708 and 5574194272. Skipping...\n", + "No path found between 540028708 and 9348295. Skipping...\n", + "No path found between 540028708 and 3630145575. Skipping...\n", + "No path found between 540028708 and 364963. Skipping...\n", + "No path found between 540028708 and 7760960. Skipping...\n", + "No path found between 540028708 and 4760747122. Skipping...\n", + "No path found between 540028708 and 366239. Skipping...\n", + "No path found between 540028708 and 7577946730. Skipping...\n", + "No path found between 540028708 and 7636462571. Skipping...\n", + "No path found between 540028708 and 365081. Skipping...\n", + "No path found between 540028708 and 7761211. Skipping...\n", + "No path found between 540028708 and 7761261. Skipping...\n", + "No path found between 540028708 and 225816169. Skipping...\n", + "No path found between 540028708 and 10214402. Skipping...\n", + "No path found between 540028708 and 7761263. Skipping...\n", + "No path found between 540028708 and 8300647. Skipping...\n", + "No path found between 540028708 and 10854963. Skipping...\n", + "No path found between 540028708 and 5585806360. Skipping...\n", + "No path found between 540028708 and 9348242. Skipping...\n", + "No path found between 540028708 and 9682444. Skipping...\n", + "No path found between 540028708 and 10980569. Skipping...\n", + "No path found between 540028708 and 9012918. Skipping...\n", + "No path found between 540028708 and 364995. Skipping...\n", + "No path found between 540028708 and 4378048314. Skipping...\n", + "No path found between 540028708 and 9484910. Skipping...\n", + "No path found between 540028708 and 364822. Skipping...\n", + "No path found between 540028708 and 10214450. Skipping...\n", + "No path found between 540028708 and 7761462. Skipping...\n", + "No path found between 540028708 and 7761274. Skipping...\n", + "No path found between 540028708 and 8545315565. Skipping...\n", + "No path found between 540028708 and 8334527. Skipping...\n", + "No path found between 540028708 and 7761306. Skipping...\n", + "No path found between 540028708 and 10382080. Skipping...\n", + "No path found between 540028708 and 7638526280. Skipping...\n", + "No path found between 540028708 and 7193113. Skipping...\n", + "No path found between 540028708 and 1309433558. Skipping...\n", + "No path found between 540028708 and 1375728592. Skipping...\n", + "No path found between 540028708 and 8300557. Skipping...\n", + "No path found between 540028708 and 2985883571. Skipping...\n", + "No path found between 540028708 and 5573028709. Skipping...\n", + "No path found between 540028708 and 10010830. Skipping...\n", + "No path found between 540028708 and 366482. Skipping...\n", + "No path found between 540028708 and 1256985889. Skipping...\n", + "No path found between 540028708 and 10925433. Skipping...\n", + "No path found between 540028708 and 697481080. Skipping...\n", + "No path found between 540028708 and 883132505. Skipping...\n", + "No path found between 540028708 and 7761237. Skipping...\n", + "No path found between 540028708 and 7135051. Skipping...\n", + "No path found between 540028708 and 3068135939. Skipping...\n", + "No path found between 540028708 and 5683384259. Skipping...\n", + "No path found between 540028708 and 10382041. Skipping...\n", + "No path found between 540028708 and 9484774. Skipping...\n", + "No path found between 540028708 and 1342125559. Skipping...\n", + "No path found between 540028708 and 7135049. Skipping...\n", + "No path found between 540028708 and 2431481029. Skipping...\n", + "No path found between 540028708 and 8784904. Skipping...\n", + "No path found between 540028708 and 10980629. Skipping...\n", + "No path found between 540028708 and 3068464115. Skipping...\n", + "No path found between 540028708 and 7761198. Skipping...\n", + "No path found between 540028708 and 7761248. Skipping...\n", + "No path found between 540028708 and 7761244. Skipping...\n", + "No path found between 540028708 and 10010621. Skipping...\n", + "No path found between 540028708 and 9376034. Skipping...\n", + "No path found between 540028708 and 3064952812. Skipping...\n", + "No path found between 540028708 and 366237. Skipping...\n", + "No path found between 540028708 and 3065580105. Skipping...\n", + "No path found between 540028708 and 10810088. Skipping...\n", + "No path found between 540028708 and 392690268. Skipping...\n", + "No path found between 540028708 and 10382065. Skipping...\n", + "No path found between 540028708 and 2827743266. Skipping...\n", + "No path found between 540028708 and 7761315. Skipping...\n", + "No path found between 540028708 and 10382088. Skipping...\n", + "No path found between 540028708 and 11703558140. Skipping...\n", + "No path found between 540028708 and 8784945. Skipping...\n", + "No path found between 540028708 and 7193082. Skipping...\n", + "No path found between 540028708 and 10382062. Skipping...\n", + "No path found between 540028708 and 8091823. Skipping...\n", + "No path found between 540028708 and 366255. Skipping...\n", + "No path found between 540028708 and 10382166. Skipping...\n", + "No path found between 540028708 and 10382097. Skipping...\n", + "No path found between 540028708 and 7638526281. Skipping...\n", + "No path found between 540028708 and 5239642. Skipping...\n", + "No path found between 540028708 and 364982. Skipping...\n", + "No path found between 540028708 and 1685990564. Skipping...\n", + "No path found between 540028708 and 9682689. Skipping...\n", + "No path found between 540028708 and 9659658886. Skipping...\n", + "No path found between 540028708 and 10980596. Skipping...\n", + "No path found between 540028708 and 10980613. Skipping...\n", + "No path found between 540028708 and 9682457. Skipping...\n", + "No path found between 540028708 and 2015910328. Skipping...\n", + "No path found between 540028708 and 3065156829. Skipping...\n", + "No path found between 540028708 and 7193005. Skipping...\n", + "No path found between 540028708 and 9795469. Skipping...\n", + "No path found between 540028708 and 7761006. Skipping...\n", + "No path found between 540028708 and 1708814. Skipping...\n", + "No path found between 540028708 and 9682560. Skipping...\n", + "No path found between 540028708 and 366466. Skipping...\n", + "No path found between 540028708 and 7760961. Skipping...\n", + "No path found between 540028708 and 9006736. Skipping...\n", + "No path found between 540028708 and 365418. Skipping...\n", + "No path found between 540028708 and 7134940. Skipping...\n", + "No path found between 540028708 and 365553. Skipping...\n", + "No path found between 540028708 and 10980615. Skipping...\n", + "No path found between 540028708 and 8300515. Skipping...\n", + "No path found between 540028708 and 364819. Skipping...\n", + "No path found between 540028708 and 365552. Skipping...\n", + "No path found between 540028708 and 2773541209. Skipping...\n", + "No path found between 540028708 and 869010256. Skipping...\n", + "No path found between 540028708 and 7761460. Skipping...\n", + "No path found between 540028708 and 3455331920. Skipping...\n", + "No path found between 540028708 and 10980576. Skipping...\n", + "No path found between 540028708 and 9006745. Skipping...\n", + "No path found between 540028708 and 7761350. Skipping...\n", + "No path found between 540028708 and 364950. Skipping...\n", + "No path found between 540028708 and 8300589. Skipping...\n", + "No path found between 540028708 and 7379194. Skipping...\n", + "No path found between 540028708 and 366249. Skipping...\n", + "No path found between 540028708 and 9484826. Skipping...\n", + "No path found between 540028708 and 9465987. Skipping...\n", + "No path found between 540028708 and 9348241. Skipping...\n", + "No path found between 540028708 and 7155044293. Skipping...\n", + "No path found between 540028708 and 7135039. Skipping...\n", + "No path found between 540028708 and 366268. Skipping...\n", + "No path found between 540028708 and 10382228. Skipping...\n", + "No path found between 540028708 and 583243035. Skipping...\n", + "No path found between 540028708 and 8447477702. Skipping...\n", + "No path found between 540028708 and 1708815. Skipping...\n", + "No path found between 540028708 and 3065154579. Skipping...\n", + "No path found between 540028708 and 364811. Skipping...\n", + "No path found between 540028708 and 9348258. Skipping...\n", + "No path found between 540028708 and 3065155346. Skipping...\n", + "No path found between 540028708 and 7761000. Skipping...\n", + "No path found between 540028708 and 7761447. Skipping...\n", + "No path found between 540028708 and 10925432. Skipping...\n", + "No path found between 540028708 and 10382177. Skipping...\n", + "No path found between 540028708 and 2015910583. Skipping...\n", + "No path found between 540028708 and 7761351. Skipping...\n", + "No path found between 540028708 and 10980578. Skipping...\n", + "No path found between 540028708 and 5574670103. Skipping...\n", + "No path found between 540028708 and 10382121. Skipping...\n", + "No path found between 540028708 and 2431480994. Skipping...\n", + "No path found between 540028708 and 364919. Skipping...\n", + "No path found between 540028708 and 3064952754. Skipping...\n", + "No path found between 540028708 and 5574194267. Skipping...\n", + "No path found between 540028708 and 364880. Skipping...\n", + "No path found between 540028708 and 10214449. Skipping...\n", + "No path found between 540028708 and 540031740. Skipping...\n", + "No path found between 540028708 and 1685990555. Skipping...\n", + "No path found between 540028708 and 10214405. Skipping...\n", + "No path found between 540028708 and 7761206. Skipping...\n", + "No path found between 540028708 and 10382106. Skipping...\n", + "No path found between 540028708 and 7193110. Skipping...\n", + "No path found between 540028708 and 10382176. Skipping...\n", + "No path found between 540028708 and 4378048320. Skipping...\n", + "No path found between 540028708 and 7135031. Skipping...\n", + "No path found between 540028708 and 8335801015. Skipping...\n", + "No path found between 540028708 and 7661470431. Skipping...\n", + "No path found between 540028708 and 9465990. Skipping...\n", + "No path found between 540028708 and 7761328. Skipping...\n", + "No path found between 540028708 and 7911101. Skipping...\n", + "No path found between 540028708 and 10382178. Skipping...\n", + "No path found between 540028708 and 2619903057. Skipping...\n", + "No path found between 540028708 and 8300540. Skipping...\n", + "No path found between 540028708 and 365617. Skipping...\n", + "No path found between 540028708 and 1808857. Skipping...\n", + "No path found between 540028708 and 8837239148. Skipping...\n", + "No path found between 540028708 and 5574194278. Skipping...\n", + "No path found between 540028708 and 26120444. Skipping...\n", + "No path found between 540028708 and 2300548965. Skipping...\n", + "No path found between 540028708 and 7761229. Skipping...\n", + "No path found between 540028708 and 7761298. Skipping...\n", + "No path found between 540028708 and 366247. Skipping...\n", + "No path found between 540028708 and 7761272. Skipping...\n", + "No path found between 540028708 and 7761262. Skipping...\n", + "No path found between 540028708 and 7761268. Skipping...\n", + "No path found between 540028708 and 364958. Skipping...\n", + "No path found between 540028708 and 7193116. Skipping...\n", + "No path found between 540028708 and 8300569. Skipping...\n", + "No path found between 540028708 and 7379187. Skipping...\n", + "No path found between 540028708 and 9006798. Skipping...\n", + "No path found between 540028708 and 392690521. Skipping...\n", + "No path found between 540028708 and 364848. Skipping...\n", + "No path found between 540028708 and 1345248899. Skipping...\n", + "No path found between 540028708 and 5239631. Skipping...\n", + "No path found between 540028708 and 8837239137. Skipping...\n", + "No path found between 540028708 and 7911172. Skipping...\n", + "No path found between 540028708 and 4337327387. Skipping...\n", + "No path found between 540028708 and 225816087. Skipping...\n", + "No path found between 540028708 and 10382032. Skipping...\n", + "No path found between 540028708 and 7193106. Skipping...\n", + "No path found between 540028708 and 1708811. Skipping...\n", + "No path found between 540028708 and 319946667. Skipping...\n", + "No path found between 540028708 and 10382063. Skipping...\n", + "No path found between 540028708 and 8334451. Skipping...\n", + "No path found between 540028708 and 10980563. Skipping...\n", + "No path found between 540028708 and 9675088807. Skipping...\n", + "No path found between 540028708 and 9095145. Skipping...\n", + "No path found between 540028708 and 366262. Skipping...\n", + "No path found between 540028708 and 3065582227. Skipping...\n", + "No path found between 540028708 and 2839133463. Skipping...\n", + "No path found between 540028708 and 8837239127. Skipping...\n", + "No path found between 540028708 and 10382161. Skipping...\n", + "No path found between 540028708 and 7760997. Skipping...\n", + "No path found between 540028708 and 10214445. Skipping...\n", + "No path found between 540028708 and 5239635. Skipping...\n", + "No path found between 540028708 and 7761292. Skipping...\n", + "No path found between 540028708 and 10382079. Skipping...\n", + "No path found between 540028708 and 7135040. Skipping...\n", + "No path found between 540028708 and 7760976. Skipping...\n", + "No path found between 540028708 and 8300625. Skipping...\n", + "No path found between 540028708 and 8117585116. Skipping...\n", + "No path found between 540028708 and 32126253. Skipping...\n", + "No path found between 540028708 and 7134999. Skipping...\n", + "No path found between 540028708 and 7193104. Skipping...\n", + "No path found between 540028708 and 10980597. Skipping...\n", + "No path found between 540028708 and 10010638. Skipping...\n", + "No path found between 540028708 and 9484920. Skipping...\n", + "No path found between 540028708 and 7135035. Skipping...\n", + "No path found between 540028708 and 4311659315. Skipping...\n", + "No path found between 540028708 and 7761010. Skipping...\n", + "No path found between 540028708 and 9006738. Skipping...\n", + "No path found between 540028708 and 10382118. Skipping...\n", + "No path found between 540028708 and 8091815. Skipping...\n", + "No path found between 540028708 and 5574194269. Skipping...\n", + "No path found between 540028708 and 8785049. Skipping...\n", + "No path found between 540028708 and 7761349. Skipping...\n", + "No path found between 540028708 and 7761054. Skipping...\n", + "No path found between 540028708 and 2831569625. Skipping...\n", + "No path found between 540028708 and 10980603. Skipping...\n", + "No path found between 540028708 and 10382064. Skipping...\n", + "No path found between 540028708 and 7761062. Skipping...\n", + "No path found between 540028708 and 9682690. Skipping...\n", + "No path found between 540028708 and 7761218. Skipping...\n", + "No path found between 540028708 and 7761331. Skipping...\n", + "No path found between 540028708 and 1268702915. Skipping...\n", + "No path found between 540028708 and 841598831. Skipping...\n", + "No path found between 540028708 and 392690520. Skipping...\n", + "No path found between 540028708 and 10980640. Skipping...\n", + "No path found between 540028708 and 7193073. Skipping...\n", + "No path found between 540028708 and 9682458. Skipping...\n", + "No path found between 540028708 and 8335801016. Skipping...\n", + "No path found between 540028708 and 9682686. Skipping...\n", + "No path found between 540028708 and 10382165. Skipping...\n", + "No path found between 540028708 and 7761239. Skipping...\n", + "No path found between 540028708 and 8784903. Skipping...\n", + "No path found between 540028708 and 1708829. Skipping...\n", + "No path found between 540028708 and 4336198950. Skipping...\n", + "No path found between 540028708 and 225816042. Skipping...\n", + "No path found between 540028708 and 10382173. Skipping...\n", + "No path found between 540028708 and 582823188. Skipping...\n", + "No path found between 540028708 and 9682688. Skipping...\n", + "No path found between 540028708 and 10382126. Skipping...\n", + "No path found between 540028708 and 5239629. Skipping...\n", + "No path found between 540028708 and 403887715. Skipping...\n", + "No path found between 540028708 and 4136572206. Skipping...\n", + "No path found between 540028708 and 10214447. Skipping...\n", + "No path found between 540028708 and 7193012. Skipping...\n", + "No path found between 540028708 and 1708832. Skipping...\n", + "No path found between 540028708 and 1708882. Skipping...\n", + "No path found between 540028708 and 4763054359. Skipping...\n", + "No path found between 540028708 and 10382148. Skipping...\n", + "No path found between 540028708 and 364828. Skipping...\n", + "No path found between 540028708 and 2434915678. Skipping...\n", + "No path found between 540028708 and 10810082. Skipping...\n", + "No path found between 540028708 and 364839. Skipping...\n", + "No path found between 540028708 and 10929256. Skipping...\n", + "No path found between 540028708 and 8784943. Skipping...\n", + "No path found between 540028708 and 364914. Skipping...\n", + "No path found between 540028708 and 10012786. Skipping...\n", + "No path found between 540028708 and 7761457. Skipping...\n", + "No path found between 540028708 and 364910. Skipping...\n", + "No path found between 540028708 and 366264. Skipping...\n", + "No path found between 540028708 and 1708856. Skipping...\n", + "No path found between 540028708 and 10810076. Skipping...\n", + "No path found between 540028708 and 7761119. Skipping...\n", + "No path found between 540028708 and 7761240. Skipping...\n", + "No path found between 540028708 and 7761450. Skipping...\n", + "No path found between 540028708 and 366267. Skipping...\n", + "No path found between 540028708 and 9006790. Skipping...\n", + "No path found between 540028708 and 365451. Skipping...\n", + "No path found between 540028708 and 10855008. Skipping...\n", + "No path found between 540028708 and 10810079. Skipping...\n", + "No path found between 540028708 and 10929257. Skipping...\n", + "No path found between 540028708 and 9006762. Skipping...\n", + "No path found between 540028708 and 8328537173. Skipping...\n", + "No path found between 540028708 and 10980564. Skipping...\n", + "No path found between 540028708 and 1708827. Skipping...\n", + "No path found between 540028708 and 403796569. Skipping...\n", + "No path found between 540028708 and 364815. Skipping...\n", + "No path found between 540028708 and 1375732743. Skipping...\n", + "No path found between 540028708 and 7135048. Skipping...\n", + "No path found between 540028708 and 366277. Skipping...\n", + "No path found between 540028708 and 1297723547. Skipping...\n", + "No path found between 540028708 and 8091839. Skipping...\n", + "No path found between 540028708 and 366228. Skipping...\n", + "No path found between 540028708 and 10925438. Skipping...\n", + "No path found between 540028708 and 9795544. Skipping...\n", + "No path found between 540028708 and 364946. Skipping...\n", + "No path found between 540028708 and 366258. Skipping...\n", + "No path found between 540028708 and 9095144. Skipping...\n", + "No path found between 540028708 and 7761282. Skipping...\n", + "No path found between 540028708 and 9484917. Skipping...\n", + "No path found between 540028708 and 7761280. Skipping...\n", + "No path found between 540028708 and 9012920. Skipping...\n", + "No path found between 540028708 and 4136572207. Skipping...\n", + "No path found between 540028708 and 4136572210. Skipping...\n", + "No path found between 540028708 and 9095153. Skipping...\n", + "No path found between 540028708 and 8300561. Skipping...\n", + "No path found between 540028708 and 8300626. Skipping...\n", + "No path found between 540028708 and 10980566. Skipping...\n", + "No path found between 540028708 and 10382070. Skipping...\n", + "No path found between 540028708 and 7911186. Skipping...\n", + "No path found between 540028708 and 8784998. Skipping...\n", + "No path found between 540028708 and 9682460. Skipping...\n", + "No path found between 540028708 and 403796568. Skipping...\n", + "No path found between 540028708 and 3068468634. Skipping...\n", + "No path found between 540028708 and 7761260. Skipping...\n", + "No path found between 540028708 and 7135043. Skipping...\n", + "No path found between 540028708 and 392690370. Skipping...\n", + "No path found between 540028708 and 364927. Skipping...\n", + "No path found between 540028708 and 7911276. Skipping...\n", + "No path found between 540028708 and 9348240. Skipping...\n", + "No path found between 540028708 and 7134941. Skipping...\n", + "No path found between 540028708 and 4311659306. Skipping...\n", + "No path found between 540028708 and 364884. Skipping...\n", + "No path found between 540028708 and 9484796. Skipping...\n", + "No path found between 540028708 and 364851. Skipping...\n", + "No path found between 540028708 and 7911098. Skipping...\n", + "No path found between 540028708 and 1685990563. Skipping...\n", + "No path found between 540028708 and 7193083. Skipping...\n", + "No path found between 540028708 and 2831569613. Skipping...\n", + "No path found between 540028708 and 6991775. Skipping...\n", + "No path found between 540028708 and 5239639. Skipping...\n", + "No path found between 540028708 and 3065581208. Skipping...\n", + "No path found between 540028708 and 225816237. Skipping...\n", + "No path found between 540028708 and 9006793. Skipping...\n", + "No path found between 540028708 and 10980574. Skipping...\n", + "No path found between 540028708 and 8328537170. Skipping...\n", + "No path found between 540028708 and 5239632. Skipping...\n", + "No path found between 540028708 and 7379176. Skipping...\n", + "No path found between 540028708 and 7761289. Skipping...\n", + "No path found between 540028708 and 9006764. Skipping...\n", + "No path found between 540028708 and 7761061. Skipping...\n", + "No path found between 540028708 and 10925430. Skipping...\n", + "No path found between 540028708 and 225816133. Skipping...\n", + "No path found between 540028708 and 366254. Skipping...\n", + "No path found between 540028708 and 10382084. Skipping...\n", + "No path found between 540028708 and 9095200. Skipping...\n", + "No path found between 540028708 and 7761176. Skipping...\n", + "No path found between 540028708 and 583958160. Skipping...\n", + "No path found between 540028708 and 9012908. Skipping...\n", + "No path found between 540028708 and 9095222. Skipping...\n", + "No path found between 540028708 and 7193004. Skipping...\n", + "No path found between 540028708 and 8837239133. Skipping...\n", + "No path found between 540028708 and 7379192. Skipping...\n", + "No path found between 540028708 and 601045205. Skipping...\n", + "No path found between 540028708 and 7761283. Skipping...\n", + "No path found between 540028708 and 8784911. Skipping...\n", + "No path found between 540028708 and 9006833. Skipping...\n", + "No path found between 540028708 and 2839133464. Skipping...\n", + "No path found between 540028708 and 10214448. Skipping...\n", + "No path found between 540028708 and 392690407. Skipping...\n", + "No path found between 540028708 and 583510634. Skipping...\n", + "No path found between 540028708 and 8086357. Skipping...\n", + "No path found between 540028708 and 7760983. Skipping...\n", + "No path found between 540028708 and 8300562. Skipping...\n", + "No path found between 540028708 and 364972. Skipping...\n", + "No path found between 540028708 and 11703558141. Skipping...\n", + "No path found between 540028708 and 10382089. Skipping...\n", + "No path found between 540028708 and 10382125. Skipping...\n", + "No path found between 540028708 and 366201. Skipping...\n", + "No path found between 540028708 and 7193079. Skipping...\n", + "No path found between 540028708 and 5574194268. Skipping...\n", + "No path found between 540028708 and 10382039. Skipping...\n", + "No path found between 540028708 and 9484810. Skipping...\n", + "No path found between 540028708 and 7761278. Skipping...\n", + "No path found between 540028708 and 2985883608. Skipping...\n", + "No path found between 540028708 and 10382164. Skipping...\n", + "No path found between 540028708 and 2827743265. Skipping...\n", + "No path found between 540028708 and 4311659314. Skipping...\n", + "No path found between 540028708 and 1708802. Skipping...\n", + "No path found between 540028708 and 10855009. Skipping...\n", + "No path found between 540028708 and 4136572202. Skipping...\n", + "No path found between 540028708 and 8086196. Skipping...\n", + "No path found between 540028708 and 8837239142. Skipping...\n", + "No path found between 540028708 and 1708867. Skipping...\n", + "No path found between 540028708 and 8837239138. Skipping...\n", + "No path found between 540028708 and 8300995112. Skipping...\n", + "No path found between 540028708 and 1708866. Skipping...\n", + "No path found between 540028708 and 7761323. Skipping...\n", + "No path found between 540028708 and 9831958. Skipping...\n", + "No path found between 540028708 and 9095311. Skipping...\n", + "No path found between 540028708 and 10382038. Skipping...\n", + "No path found between 540028708 and 10810087. Skipping...\n", + "No path found between 540028708 and 7661470430. Skipping...\n", + "No path found between 540028708 and 7761304. Skipping...\n", + "No path found between 540028708 and 7761345. Skipping...\n", + "No path found between 540028708 and 9348244. Skipping...\n", + "No path found between 540028708 and 7379191. Skipping...\n", + "No path found between 540028708 and 366468. Skipping...\n", + "No path found between 540028708 and 365571. Skipping...\n", + "No path found between 540028708 and 366215. Skipping...\n", + "No path found between 540028708 and 8091817. Skipping...\n", + "No path found between 540028708 and 225816230. Skipping...\n", + "No path found between 540028708 and 8300576. Skipping...\n", + "No path found between 540028708 and 3065155088. Skipping...\n", + "No path found between 540028708 and 7379190. Skipping...\n", + "No path found between 540028708 and 10854962. Skipping...\n", + "No path found between 540028708 and 8087110. Skipping...\n", + "No path found between 540028708 and 5239633. Skipping...\n", + "No path found between 540028708 and 365590. Skipping...\n", + "No path found between 540028708 and 7193075. Skipping...\n", + "No path found between 540028708 and 11290426. Skipping...\n", + "No path found between 540028708 and 4760747125. Skipping...\n", + "No path found between 540028708 and 9012907. Skipping...\n", + "No path found between 540028708 and 10382175. Skipping...\n", + "No path found between 540028708 and 5683384269. Skipping...\n", + "No path found between 540028708 and 8300592. Skipping...\n", + "No path found between 540028708 and 10382040. Skipping...\n", + "No path found between 540028708 and 9682692. Skipping...\n", + "No path found between 540028708 and 7193076. Skipping...\n", + "No path found between 540028708 and 583474475. Skipping...\n", + "No path found between 540028708 and 9831959. Skipping...\n", + "No path found between 540028708 and 364977. Skipping...\n", + "No path found between 540028708 and 364908. Skipping...\n", + "No path found between 540028708 and 364983. Skipping...\n", + "No path found between 540028708 and 7760962. Skipping...\n", + "No path found between 540028708 and 8087109. Skipping...\n", + "No path found between 540028708 and 8087145. Skipping...\n", + "No path found between 540028708 and 10925428. Skipping...\n", + "No path found between 540028708 and 366202. Skipping...\n", + "No path found between 540028708 and 366257. Skipping...\n", + "No path found between 540028708 and 9795477. Skipping...\n", + "No path found between 540028708 and 9442870419. Skipping...\n", + "No path found between 540028708 and 7379189. Skipping...\n", + "No path found between 540028708 and 9795541. Skipping...\n", + "No path found between 540028708 and 366472. Skipping...\n", + "No path found between 540028708 and 7761266. Skipping...\n", + "No path found between 540028708 and 2431481032. Skipping...\n", + "No path found between 540028708 and 1708795. Skipping...\n", + "No path found between 540028708 and 7761285. Skipping...\n", + "No path found between 540028708 and 365586. Skipping...\n", + "No path found between 540028708 and 7761489. Skipping...\n", + "No path found between 540028708 and 7379188. Skipping...\n", + "No path found between 540028708 and 5239644. Skipping...\n", + "No path found between 540028708 and 9484918. Skipping...\n", + "No path found between 540028708 and 9682592. Skipping...\n", + "No path found between 540028708 and 2773541234. Skipping...\n", + "No path found between 540028708 and 10382233. Skipping...\n", + "No path found between 540028708 and 1782387239. Skipping...\n", + "No path found between 540028708 and 151215655. Skipping...\n", + "No path found between 540028708 and 1334612513. Skipping...\n", + "No path found between 540028708 and 7761312. Skipping...\n", + "No path found between 540028708 and 7761324. Skipping...\n", + "No path found between 7135031 and 7761331. Skipping...\n", + "No path found between 7135031 and 7761328. Skipping...\n", + "No path found between 11290426 and 7761331. Skipping...\n", + "No path found between 11290426 and 7761328. Skipping...\n", + "No path found between 7135035 and 7761331. Skipping...\n", + "No path found between 9682747 and 7761331. Skipping...\n", + "No path found between 9682747 and 7761328. Skipping...\n", + "No path found between 7135037 and 7761331. Skipping...\n", + "No path found between 7135037 and 7761328. Skipping...\n", + "No path found between 11290430 and 7761328. Skipping...\n", + "No path found between 11290430 and 7761331. Skipping...\n", + "No path found between 7135039 and 7761328. Skipping...\n", + "No path found between 7135039 and 7761331. Skipping...\n", + "No path found between 7135040 and 7761331. Skipping...\n", + "No path found between 7135040 and 7761328. Skipping...\n", + "No path found between 4378048314 and 7761331. Skipping...\n", + "No path found between 4378048320 and 7761328. Skipping...\n", + "No path found between 4378048320 and 7761331. Skipping...\n", + "No path found between 7135043 and 7761328. Skipping...\n", + "No path found between 7135043 and 7761331. Skipping...\n", + "No path found between 7135044 and 7761328. Skipping...\n", + "No path found between 7135048 and 7761328. Skipping...\n", + "No path found between 7135048 and 7761331. Skipping...\n", + "No path found between 7135049 and 7761328. Skipping...\n", + "No path found between 7135049 and 7761331. Skipping...\n", + "No path found between 7135051 and 7761331. Skipping...\n", + "No path found between 7135051 and 7761328. Skipping...\n", + "No path found between 2300548965 and 7761328. Skipping...\n", + "No path found between 2300548965 and 7761331. Skipping...\n", + "No path found between 7911276 and 7761328. Skipping...\n", + "No path found between 7911276 and 7761331. Skipping...\n", + "No path found between 841598831 and 7761331. Skipping...\n", + "No path found between 841598831 and 7761328. Skipping...\n", + "No path found between 697481080 and 7761328. Skipping...\n", + "No path found between 697481080 and 7761331. Skipping...\n", + "No path found between 366461 and 7761328. Skipping...\n", + "No path found between 366461 and 7761331. Skipping...\n", + "No path found between 366466 and 7761331. Skipping...\n", + "No path found between 366466 and 7761328. Skipping...\n", + "No path found between 366472 and 7761331. Skipping...\n", + "No path found between 9795469 and 7761331. Skipping...\n", + "No path found between 9795469 and 7761328. Skipping...\n", + "No path found between 14378897 and 7761331. Skipping...\n", + "No path found between 14378897 and 7761328. Skipping...\n", + "No path found between 366482 and 7761331. Skipping...\n", + "No path found between 366482 and 7761328. Skipping...\n", + "No path found between 366484 and 7761331. Skipping...\n", + "No path found between 366484 and 7761328. Skipping...\n", + "No path found between 9795477 and 7761331. Skipping...\n", + "No path found between 9795477 and 7761328. Skipping...\n", + "No path found between 6991772 and 7761331. Skipping...\n", + "No path found between 6991775 and 7761328. Skipping...\n", + "No path found between 6991780 and 7761328. Skipping...\n", + "No path found between 6991780 and 7761331. Skipping...\n", + "No path found between 319946667 and 7761328. Skipping...\n", + "No path found between 319946667 and 7761331. Skipping...\n", + "No path found between 319946669 and 7761331. Skipping...\n", + "No path found between 3064952754 and 7761331. Skipping...\n", + "No path found between 3064952754 and 7761328. Skipping...\n", + "No path found between 2985883571 and 7761328. Skipping...\n", + "No path found between 5683384259 and 7761328. Skipping...\n", + "No path found between 7155044293 and 7761328. Skipping...\n", + "No path found between 7155044293 and 7761331. Skipping...\n", + "No path found between 8447477702 and 7761331. Skipping...\n", + "No path found between 8447477702 and 7761328. Skipping...\n", + "No path found between 5683384269 and 7761331. Skipping...\n", + "No path found between 5683384269 and 7761328. Skipping...\n", + "No path found between 5683384271 and 7761331. Skipping...\n", + "No path found between 5683384271 and 7761328. Skipping...\n", + "No path found between 9795541 and 7761328. Skipping...\n", + "No path found between 9795541 and 7761331. Skipping...\n", + "No path found between 9795542 and 7761328. Skipping...\n", + "No path found between 9795542 and 7761331. Skipping...\n", + "No path found between 9795543 and 7761331. Skipping...\n", + "No path found between 9795543 and 7761328. Skipping...\n", + "No path found between 9795544 and 7761328. Skipping...\n", + "No path found between 9795544 and 7761331. Skipping...\n", + "No path found between 9795545 and 7761328. Skipping...\n", + "No path found between 9795545 and 7761331. Skipping...\n", + "No path found between 2985883608 and 7761328. Skipping...\n", + "No path found between 8300515 and 7761331. Skipping...\n", + "No path found between 8300515 and 7761328. Skipping...\n", + "No path found between 5683384294 and 7761331. Skipping...\n", + "No path found between 5683384294 and 7761328. Skipping...\n", + "No path found between 9095144 and 7761331. Skipping...\n", + "No path found between 9095144 and 7761328. Skipping...\n", + "No path found between 9095145 and 7761331. Skipping...\n", + "No path found between 9095145 and 7761328. Skipping...\n", + "No path found between 3064952812 and 7761331. Skipping...\n", + "No path found between 3064952812 and 7761328. Skipping...\n", + "No path found between 5683384300 and 7761331. Skipping...\n", + "No path found between 5683384300 and 7761328. Skipping...\n", + "No path found between 9095153 and 7761328. Skipping...\n", + "No path found between 9095153 and 7761331. Skipping...\n", + "No path found between 11703558139 and 7761331. Skipping...\n", + "No path found between 11703558139 and 7761328. Skipping...\n", + "No path found between 11703558140 and 7761331. Skipping...\n", + "No path found between 11703558140 and 7761328. Skipping...\n", + "No path found between 11703558141 and 7761328. Skipping...\n", + "No path found between 11703558141 and 7761331. Skipping...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\IPython\\core\\interactiveshell.py:3445: FutureWarning: The `op` parameter is deprecated and will be removed in a future release. Please use the `predicate` parameter instead.\n", + " if await self.run_code(code, result, async_=asy):\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlYAAAHFCAYAAAAwv7dvAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABeOUlEQVR4nO3de1xUdf4/8NcMl2FAGEGEYQARzTtIpZniBa9oCXbZLuuFNMvvbolmamuX365YraiV393NrO1mtpX03TXcLkqa10xQJEnwXiqggCjCcB9g5vP7Azk6cnHAgTMDr+fjMV+dc95z5j3Td5tXn/M5n6MQQggQERER0W1Tyt0AERERUUfBYEVERERkJQxWRERERFbCYEVERERkJQxWRERERFbCYEVERERkJQxWRERERFbCYEVERERkJQxWRERERFbCYEVE7eKTTz6BQqFo8rFnzx6ptmfPnpgzZ470fM+ePVAoFPjPf/7T/o1bIC4uDgqFAkqlEmfPnm2wv7y8HB4eHlAoFGafqyVWrlyJLVu2NNhe/70ePny4VcdtibFjx2Ls2LFt/j5E9sxR7gaIqHPZsGED+vfv32D7wIEDZejGurp06YINGzbgtddeM9v+73//GzU1NXBycmr1sVeuXIlHHnkEDz744G12SURticGKiNpVSEgIhg4dKncbbeLxxx/Hxo0bsWLFCiiV108IfPTRR3jooYfw9ddfy9gdEbUHngokIrtRVVWFxYsXQ6vVQq1WIyIiAkeOHGlQ9/XXX2PEiBFwdXWFu7s7Jk2ahOTkZGn/sWPHoFAo8O9//1valpaWBoVCgUGDBpkda9q0aRgyZIhF/c2dOxc5OTnYsWOHtO306dPYv38/5s6d2+hrSkpKsHTpUgQHB8PZ2Rn+/v5YtGgRysvLpRqFQoHy8nJs3LhROnV68ym50tJSPPPMM/D29ka3bt3w8MMPIzc316zGZDJhzZo16N+/P1QqFXx8fPDEE0/gwoULZnVCCKxZswZBQUFwcXHB3XffjW3btln0HRB1dgxWRNSujEYjamtrzR5Go9Gi17788ss4e/YsPvzwQ3z44YfIzc3F2LFjzeY1ffHFF3jggQfg4eGBTZs24aOPPkJRURHGjh2L/fv3AwAGDRoEPz8//PDDD9LrfvjhB6jVahw/flwKJLW1tdi7dy8mTpxoUX99+vTB6NGj8fHHH0vbPv74Y/Ts2RMTJkxoUF9RUYGIiAhs3LgRCxcuxLZt27Bs2TJ88sknmDZtGoQQAIDk5GSo1Wrcf//9SE5ORnJyMtavX292rKeffhpOTk744osvsGbNGuzZswezZs0yq3nmmWewbNkyTJo0CV9//TVee+01JCUlITw8HFeuXJHqVqxYIdVt2bIFzzzzDObNm4dTp05Z9D0QdWqCiKgdbNiwQQBo9OHg4GBWGxQUJGbPni093717twAg7r77bmEymaTt58+fF05OTuLpp58WQghhNBqFTqcToaGhwmg0SnWlpaXCx8dHhIeHS9tmzZolevXqJT2fOHGimDdvnvD09BQbN24UQgjx008/CQBi+/btzX625cuXCwDi8uXLYsOGDUKlUonCwkJRW1sr/Pz8RFxcnBBCCDc3N7PPFR8fL5RKpUhNTTU73n/+8x8BQGzdulXadvNrb/5en332WbPta9asEQBEXl6eEEKIEydONFp38OBBAUC8/PLLQgghioqKhIuLi3jooYfM6uq/i4iIiGa/C6LOjiNWRNSuPv30U6Smppo9Dh48aNFrZ8yYAYVCIT0PCgpCeHg4du/eDQA4deoUcnNzERMTYzbHqUuXLvjd736HlJQUVFRUAAAmTJiAs2fP4ty5c6iqqsL+/fsxZcoUjBs3TjqV98MPP0ClUmHUqFEWf75HH30Uzs7O+Pzzz7F161bk5+c3eSXgt99+i5CQENx5551mI3iTJ09ucKXkrUybNs3s+eDBgwEAWVlZACB9Rzf3MmzYMAwYMAA7d+4EUDc6VlVVhZkzZ5rVhYeHIygoyOJ+iDorTl4nonY1YMCAVk9e12q1jW775ZdfAACFhYUAAD8/vwZ1Op0OJpMJRUVFcHV1lU7v/fDDDwgODkZNTQ3Gjx+PS5cuSVf1/fDDDxg5ciTUarXFPbq5ueHxxx/Hxx9/jKCgIEycOLHJQHLp0iX8+uuvTV4teOPpuVvp1q2b2XOVSgUAqKysBHDr76Y+gNXXNfVdE1HzGKyIyG7k5+c3uq0+VNT/mZeX16AuNzcXSqUSnp6eAICAgAD07dsXP/zwA3r27ImhQ4eia9eumDBhAp599lkcPHgQKSkpWLFiRYv7nDt3Lj788EMcPXoUn3/+eZN13t7eUKvVZnOybt5vLTd+NwEBAWb7cnNzpfeqr2vqu+7Zs6fVeiLqiHgqkIjsxqZNm6QJ3UDdaa4DBw5IV8j169cP/v7++OKLL8zqysvLsXnzZulKwXoTJ07Erl27sGPHDkyaNAkA0LdvX/To0QN/+ctfUFNTY/HE9RuNGDECc+fOxUMPPYSHHnqoybqoqCj89ttv6NatG4YOHdrgcWOIUalU0uhTa4wfPx4A8Nlnn5ltT01NxYkTJ6TJ9cOHD4eLi0uDQHjgwAFpVIuImsYRKyJqV5mZmaitrW2wvXfv3ujevXuzry0oKMBDDz2EefPmQa/XY/ny5XBxccFLL70EAFAqlVizZg1mzpyJqKgo/OEPf4DBYMAbb7yB4uJirFq1yux4EyZMwPr163HlyhX87W9/M9u+YcMGeHp6WrzUws0++uijW9YsWrQImzdvxpgxY/D8889j8ODBMJlMyM7Oxvbt27FkyRLce++9AIDQ0FDs2bMH33zzDfz8/ODu7o5+/fpZ3E+/fv3wP//zP3j77behVCpx33334fz58/jzn/+MwMBAPP/88wAAT09PLF26FK+//jqefvppPProo8jJyUFcXBxPBRJZgMGKiNrVk08+2ej2Dz74AE8//XSzr125ciVSU1Px5JNPoqSkBMOGDUNCQgJ69+4t1cyYMQNubm6Ij4/H448/DgcHBwwfPhy7d+9GeHi42fHGjx8PpVIJtVqNESNGSNsnTpyIDRs2YNy4cWaT4K3Nzc0NP/74I1atWoX3338f586dg1qtRo8ePTBx4kSzEau///3vmD9/Pn7/+99LyzS0ZHI7ALz77rvo3bs3PvroI7zzzjvQaDSYMmUK4uPjzeZovfrqq3Bzc8P69evxr3/9C/3798d7772HN99800qfnKjjUogbx8uJiIiIqNU4x4qIiIjIShisiIiIiKyEwYqIiIjIShisiIiIiKyEwYqIiIjIShisiIiIiKyE61i1M5PJhNzcXLi7u5vdTJaIiIhslxACpaWl0Ol0za5vx2DVznJzcxEYGCh3G0RERNQKOTk5De63eSMGq3bm7u4OoO4fjIeHh8zdEBERkSVKSkoQGBgo/Y43hcGqndWf/vPw8GCwIiIisjO3msbDyetEREREVsJgRURERGQlDFZEREREVsJgRURERGQlDFZEREREVsJgRURERGQlDFZEREREVsJgRURERGQlDFZEREREVsKV1zsAo0ng0LmrKCitgo+7C4YFe8FByRs8ExERtTcGKzuXlJmHFd8cR56+Strmp3HB8uiBmBLiJ2NnREREnQ9PBdqxpMw8PPPZz2ahCgDy9VV45rOfkZSZJ1NnREREnRODlZ0ymgRWfHMcopF99dtWfHMcRlNjFURERNQWGKzs1KFzVxuMVN1IAMjTV+HQuavt1xQREVEnx2BlpwpKmw5VrakjIiKi28dgZad83F2sWkdERES3T9ZgFR8fj3vuuQfu7u7w8fHBgw8+iFOnTpnVzJkzBwqFwuwxfPhwsxqDwYAFCxbA29sbbm5umDZtGi5cuGBWU1RUhJiYGGg0Gmg0GsTExKC4uNisJjs7G9HR0XBzc4O3tzcWLlyI6upqs5qMjAxERERArVbD398fr776KoRo/3lMw4K94KdxQVOLKihQd3XgsGCv9myLiIioU5M1WO3duxfz589HSkoKduzYgdraWkRGRqK8vNysbsqUKcjLy5MeW7duNdu/aNEiJCYmIiEhAfv370dZWRmioqJgNBqlmhkzZiA9PR1JSUlISkpCeno6YmJipP1GoxFTp05FeXk59u/fj4SEBGzevBlLliyRakpKSjBp0iTodDqkpqbi7bffxptvvom1a9e20TfUNAelAsujBwJAg3BV/3x59ECuZ0VERNSehA0pKCgQAMTevXulbbNnzxYPPPBAk68pLi4WTk5OIiEhQdp28eJFoVQqRVJSkhBCiOPHjwsAIiUlRapJTk4WAMTJkyeFEEJs3bpVKJVKcfHiRalm06ZNQqVSCb1eL4QQYv369UKj0YiqqiqpJj4+Xuh0OmEymSz6jHq9XgCQjnm7tmXkiuErfxBBy76VHsP+ukNsy8i1yvGJiIjI8t9vm5pjpdfrAQBeXuanr/bs2QMfHx/07dsX8+bNQ0FBgbQvLS0NNTU1iIyMlLbpdDqEhITgwIEDAIDk5GRoNBrce++9Us3w4cOh0WjMakJCQqDT6aSayZMnw2AwIC0tTaqJiIiASqUyq8nNzcX58+cb/UwGgwElJSVmD2uaEuKH/cvGY9O84fBxr+tr5YOhXByUiIhIBjYTrIQQWLx4MUaNGoWQkBBp+3333YfPP/8cu3btwltvvYXU1FSMHz8eBoMBAJCfnw9nZ2d4enqaHc/X1xf5+flSjY+PT4P39PHxMavx9fU12+/p6QlnZ+dma+qf19fcLD4+XprXpdFoEBgYaPF3YikHpQIjenfDyDu8AQAZuXqrvwcRERHdms0Eq9jYWBw9ehSbNm0y2/74449j6tSpCAkJQXR0NLZt24bTp0/ju+++a/Z4QggoFNfnF934d2vWiGsT1xt7LQC89NJL0Ov10iMnJ6fZvm/H4AANACDjAoMVERGRHGwiWC1YsABff/01du/ejYCAgGZr/fz8EBQUhDNnzgAAtFotqqurUVRUZFZXUFAgjSZptVpcunSpwbEuX75sVnPzqFNRURFqamqarak/LXnzSFY9lUoFDw8Ps0dbGRzQFQDwywW9LFcqEhERdXayBishBGJjY/HVV19h165dCA4OvuVrCgsLkZOTAz+/ujlEQ4YMgZOTE3bs2CHV5OXlITMzE+Hh4QCAESNGQK/X49ChQ1LNwYMHodfrzWoyMzORl3f9/nrbt2+HSqXCkCFDpJp9+/aZLcGwfft26HQ69OzZs/VfhJUM0nnAUanAlTJDs6uyExERUduQNVjNnz8fn332Gb744gu4u7sjPz8f+fn5qKysBACUlZVh6dKlSE5Oxvnz57Fnzx5ER0fD29sbDz30EABAo9HgqaeewpIlS7Bz504cOXIEs2bNQmhoKCZOnAgAGDBgAKZMmYJ58+YhJSUFKSkpmDdvHqKiotCvXz8AQGRkJAYOHIiYmBgcOXIEO3fuxNKlSzFv3jxplGnGjBlQqVSYM2cOMjMzkZiYiJUrV2Lx4sVNngpsTy5ODujr6w4AOHqhWN5miIiIOqO2vjyxOai7pV2Dx4YNG4QQQlRUVIjIyEjRvXt34eTkJHr06CFmz54tsrOzzY5TWVkpYmNjhZeXl1Cr1SIqKqpBTWFhoZg5c6Zwd3cX7u7uYubMmaKoqMisJisrS0ydOlWo1Wrh5eUlYmNjzZZWEEKIo0ePitGjRwuVSiW0Wq2Ii4uzeKkFIay/3MLNXtz8iwha9q1Yte1EmxyfiIioM7L091shBCfjtKeSkhJoNBro9fo2mW/1xcFsvJyYgZF3dMPnTw+/9QuIiIjoliz9/baJyetkPfVXBh7lBHYiIqJ2x2DVwfTTukPlqERpVS3OF1bI3Q4REVGnwmDVwTg5KDFQVzdEyQnsRERE7YvBqgMKq1/PKocLhRIREbUnBqsO6Po8q2J5GyEiIupkGKw6oPpglZmrR63RJHM3REREnQeDVQfUy7sLuqgcUVVjwpmCMrnbISIi6jQYrDogpVKBEP+6Cey8ITMREVH7YbDqoKQJ7JxnRURE1G4YrDqowdeC1VGOWBEREbUbBqsOqn4C+8n8EhhqjTJ3Q0RE1DkwWHVQAZ5qeLk5o8YocCKvVO52iIiIOgUGqw5KoVAg1J/rWREREbUnBqsOLOyGGzITERFR22Ow6sCuT2AvlrUPIiKizoLBqgMbHFg3YvVrQRnKDbUyd0NERNTxMVh1YD7uLvDTuMAkgMyLPB1IRETU1hisOrjBnGdFRETUbhisOrjBXIGdiIio3TBYdXAcsSIiImo/DFYd3GD/rgCA7KsVKK6olrcZIiKiDo7BqoPTuDqhZzdXABy1IiIiamsMVp0A17MiIiJqHwxWnUD9PKtfOGJFRETUphisOgGOWBEREbUPBqtOIMTfA0oFcKnEgEslVXK3Q0RE1GExWHUCrs6O6OPjDoAT2ImIiNoSg1UncX09q2J5GyEiIurAGKw6icGBXQFwAjsREVFbYrDqJMJuGLESQsjcDRERUcfEYNVJ9NO6w9lBieKKGuRcrZS7HSIiog6JwaqTUDk6oL9f3QR23pCZiIiobTBYdSKcwE5ERNS2GKw6kesLhXICOxERUVtgsOpEwq4Fq8yLehhNnMBORERkbQxWncgdPl3g6uyA8mojzl4uk7sdIiKiDofBqhNxUCoQouMNmYmIiNoKg1UnE8oJ7ERERG2GwaqTqb8ykCNWRERE1sdg1cnUT2A/kVeC6lqTvM0QERF1MAxWnUxQN1do1E6orjXh9KVSudshIiLqUBisOhmFQnHD6cBieZshIiLqYBisOiFpBfYczrMiIiKyJgarTijUvysAjlgRERFZG4NVJxQWWDdidaagDJXVRpm7ISIi6jgYrDohrYcLururYDQJHMvl6UAiIiJrYbDqhBQKBcKkhUIZrIiIiKyFwaqTGnxtPSuuwE5ERGQ9DFad1GCOWBEREVkdg1UnVT9idfZKOfSVNfI2Q0RE1EEwWHVSXm7OCPBUAwAyL3LUioiIyBoYrDqx+vsGcj0rIiIi62Cw6sTq51llcJ4VERGRVTBYdWLXrwxksCIiIrIGBqtOLDRAA4UCuFhciStlBrnbISIisnsMVp1YF5UjenfvAoDrWREREVmDrMEqPj4e99xzD9zd3eHj44MHH3wQp06dMqsRQiAuLg46nQ5qtRpjx47FsWPHzGoMBgMWLFgAb29vuLm5Ydq0abhw4YJZTVFREWJiYqDRaKDRaBATE4Pi4mKzmuzsbERHR8PNzQ3e3t5YuHAhqqurzWoyMjIQEREBtVoNf39/vPrqqxBCWO9LaWeD/evmWf2Sw9OBREREt0vWYLV3717Mnz8fKSkp2LFjB2praxEZGYny8nKpZs2aNVi7di3WrVuH1NRUaLVaTJo0CaWlpVLNokWLkJiYiISEBOzfvx9lZWWIioqC0Xj9BsMzZsxAeno6kpKSkJSUhPT0dMTExEj7jUYjpk6divLycuzfvx8JCQnYvHkzlixZItWUlJRg0qRJ0Ol0SE1Nxdtvv40333wTa9eubeNvqu1cXyi0WN5GiIiIOgJhQwoKCgQAsXfvXiGEECaTSWi1WrFq1SqppqqqSmg0GvHee+8JIYQoLi4WTk5OIiEhQaq5ePGiUCqVIikpSQghxPHjxwUAkZKSItUkJycLAOLkyZNCCCG2bt0qlEqluHjxolSzadMmoVKphF6vF0IIsX79eqHRaERVVZVUEx8fL3Q6nTCZTBZ9Rr1eLwBIx5RbWtZVEbTsW3H3q9st/gxERESdjaW/3zY1x0qvrzsd5eXlBQA4d+4c8vPzERkZKdWoVCpERETgwIEDAIC0tDTU1NSY1eh0OoSEhEg1ycnJ0Gg0uPfee6Wa4cOHQ6PRmNWEhIRAp9NJNZMnT4bBYEBaWppUExERAZVKZVaTm5uL8+fPW/OraDcD/TzgqFSgsLwaufoqudshIiKyazYTrIQQWLx4MUaNGoWQkBAAQH5+PgDA19fXrNbX11fal5+fD2dnZ3h6ejZb4+Pj0+A9fXx8zGpufh9PT084Ozs3W1P/vL7mZgaDASUlJWYPW+Li5IB+WncAwNGcYnmbISIisnM2E6xiY2Nx9OhRbNq0qcE+hUJh9lwI0WDbzW6uaazeGjXi2sT1pvqJj4+XJsxrNBoEBgY227ccBksrsHMCOxER0e2wiWC1YMECfP3119i9ezcCAgKk7VqtFkDD0aCCggJppEir1aK6uhpFRUXN1ly6dKnB+16+fNms5ub3KSoqQk1NTbM1BQUFABqOqtV76aWXoNfrpUdOTk4z34Q8OIGdiIjIOmQNVkIIxMbG4quvvsKuXbsQHBxstj84OBharRY7duyQtlVXV2Pv3r0IDw8HAAwZMgROTk5mNXl5ecjMzJRqRowYAb1ej0OHDkk1Bw8ehF6vN6vJzMxEXl6eVLN9+3aoVCoMGTJEqtm3b5/ZEgzbt2+HTqdDz549G/2MKpUKHh4eZg9bc+OtbUwm+106goiISHZtPYu+Oc8884zQaDRiz549Ii8vT3pUVFRINatWrRIajUZ89dVXIiMjQ0yfPl34+fmJkpISqeaPf/yjCAgIED/88IP4+eefxfjx40VYWJiora2VaqZMmSIGDx4skpOTRXJysggNDRVRUVHS/traWhESEiImTJggfv75Z/HDDz+IgIAAERsbK9UUFxcLX19fMX36dJGRkSG++uor4eHhId58802LP7OtXRUohBDVtUbR95WtImjZt+LXglK52yEiIrI5lv5+yxqsADT62LBhg1RjMpnE8uXLhVarFSqVSowZM0ZkZGSYHaeyslLExsYKLy8voVarRVRUlMjOzjarKSwsFDNnzhTu7u7C3d1dzJw5UxQVFZnVZGVlialTpwq1Wi28vLxEbGys2dIKQghx9OhRMXr0aKFSqYRWqxVxcXEtWqbAFoOVEEI89M5+EbTsW5H48wW5WyEiIrI5lv5+K4Sw42XD7VBJSQk0Gg30er1NnRaM+/oYPjlwHk+O7Inl0YPkboeIiMimWPr7bROT10l+YYH1E9h5ZSAREVFrMVgRACDUvysA4FiuHrVGk7zNEBER2SkGKwIA9PJ2g7vKEVU1Jpy+VCZ3O0RERHaJwYoAAEqlAiH+XM+KiIjodjBYkWTwtXlWXIGdiIiodRisSBJ27dY2GReLZe2DiIjIXjFYkaR+BfaTeaWoqjHK3A0REZH9YbAiiX9XNbq5OaPWJHAir0TudoiIiOwOgxVJFAoFQgO4nhUREVFrMViRmcHX5ln9wisDiYiIWozBisyEccSKiIio1RisyEz9iNVvl8tQZqiVtxkiIiI7w2BFZrq7q6DTuEAIIPMiR62IiIhagsGKGqgfteIK7ERERC3DYEUN1F8ZyBXYiYiIWobBihoI44gVERFRqzBYUQP1I1Y5Vytxtbxa5m6IiIjsB4MVNaBROyHY2w0AR62IiIhagsGKGlV/38AMzrMiIiKyGIMVNer6CuwMVkRERJZisKJGDZZWYC+WtxEiIiI7wmBFjRqk84BSARSUGpCvr5K7HSIiIrvAYEWNcnV2RF9fdwC8ITMREZGlGKyoSTwdSERE1DIMVtSk67e24QR2IiIiSzBYUZPqV2DPuKiHEELeZoiIiOwAgxU1qZ/WHc4OShRX1CD7aoXc7RAREdk8BitqkrOjEgP86iew83QgERHRrTBYUbOkeVY5xbL2QUREZA8YrKhZ168M5IgVERHRrTBYUbPCArsCADJz9TCaOIGdiIioOQxW1Kze3bvA1dkBFdVG/Ha5TO52iIiIbBqDFTXLQalAiH/d6cBfOM+KiIioWQxWdEuD/TnPioiIyBIMVnRLg6/Ns+KtbYiIiJrHYEW3FHbtysATeaWorjXJ3A0REZHtYrCiW+rh5Yqurk6oNppwMr9E7naIiIhsFoMV3ZJCoUAo51kRERHdEoMVWaT+hsycZ0VERNQ0BiuySChXYCciIrolBiuySP2I1elLpaiorpW3GSIiIhvFYEUW0Wpc4OOugkkAx3I5gZ2IiKgxDFZkscHXRq24AjsREVHjGKzIYvXrWWVc5DwrIiKixjBYkcWur8DOYEVERNQYBiuyWP1aVueulENfWSNzN0RERLaHwYos5uXmjEAvNQAgg6NWREREDTBYUYtIE9i5UCgREVEDDFbUImHSQqHF8jZCRERkgxisqEUGS7e24alAIiKimzFYUYuE+GugUAB5+ioUlFbJ3Q4REZFNYbCiFumicsQd3bsA4AR2IiKimzFYUYvV35D5FwYrIiIiMwxW1GJh0jyrYln7ICIisjUMVtRig6UrA/UQQsjcDRERke1gsKIWG+DnAUelAlfLq3GhqFLudoiIiGwGgxW1mIuTA/r7uQPgDZmJiIhuJGuw2rdvH6Kjo6HT6aBQKLBlyxaz/XPmzIFCoTB7DB8+3KzGYDBgwYIF8Pb2hpubG6ZNm4YLFy6Y1RQVFSEmJgYajQYajQYxMTEoLi42q8nOzkZ0dDTc3Nzg7e2NhQsXorq62qwmIyMDERERUKvV8Pf3x6uvvtppT4VxBXYiIqKGZA1W5eXlCAsLw7p165qsmTJlCvLy8qTH1q1bzfYvWrQIiYmJSEhIwP79+1FWVoaoqCgYjUapZsaMGUhPT0dSUhKSkpKQnp6OmJgYab/RaMTUqVNRXl6O/fv3IyEhAZs3b8aSJUukmpKSEkyaNAk6nQ6pqal4++238eabb2Lt2rVW/Ebsx+BrN2Q+msMRKyIiIomwEQBEYmKi2bbZs2eLBx54oMnXFBcXCycnJ5GQkCBtu3jxolAqlSIpKUkIIcTx48cFAJGSkiLVJCcnCwDi5MmTQgghtm7dKpRKpbh48aJUs2nTJqFSqYRerxdCCLF+/Xqh0WhEVVWVVBMfHy90Op0wmUwWf069Xi8ASMe1V8cu6kXQsm9FyF+ShNFo+ecnIiKyR5b+ftv8HKs9e/bAx8cHffv2xbx581BQUCDtS0tLQ01NDSIjI6VtOp0OISEhOHDgAAAgOTkZGo0G9957r1QzfPhwaDQas5qQkBDodDqpZvLkyTAYDEhLS5NqIiIioFKpzGpyc3Nx/vz5Jvs3GAwoKSkxe3QEfX27wMVJiVJDLc5eKZe7HSIiIptg08Hqvvvuw+eff45du3bhrbfeQmpqKsaPHw+DwQAAyM/Ph7OzMzw9Pc1e5+vri/z8fKnGx8enwbF9fHzManx9fc32e3p6wtnZudma+uf1NY2Jj4+X5nZpNBoEBga25CuwWY4OSgzS8YbMREREN7LpYPX4449j6tSpCAkJQXR0NLZt24bTp0/ju+++a/Z1QggoFArp+Y1/t2aNuDZxvbHX1nvppZeg1+ulR05OTrO925Mb17MiIiIiGw9WN/Pz80NQUBDOnDkDANBqtaiurkZRUZFZXUFBgTSapNVqcenSpQbHunz5slnNzaNORUVFqKmpabam/rTkzSNZN1KpVPDw8DB7dBRcgZ2IiMicXQWrwsJC5OTkwM/PDwAwZMgQODk5YceOHVJNXl4eMjMzER4eDgAYMWIE9Ho9Dh06JNUcPHgQer3erCYzMxN5eXlSzfbt26FSqTBkyBCpZt++fWZLMGzfvh06nQ49e/Zss89sy+rvGXgstwQ1RpPM3RAREclP1mBVVlaG9PR0pKenAwDOnTuH9PR0ZGdno6ysDEuXLkVycjLOnz+PPXv2IDo6Gt7e3njooYcAABqNBk899RSWLFmCnTt34siRI5g1axZCQ0MxceJEAMCAAQMwZcoUzJs3DykpKUhJScG8efMQFRWFfv36AQAiIyMxcOBAxMTE4MiRI9i5cyeWLl2KefPmSSNMM2bMgEqlwpw5c5CZmYnExESsXLkSixcvbvZUYEcW3M0N7ipHGGpNOH2pVO52iIiI5NcOVyg2affu3QJAg8fs2bNFRUWFiIyMFN27dxdOTk6iR48eYvbs2SI7O9vsGJWVlSI2NlZ4eXkJtVotoqKiGtQUFhaKmTNnCnd3d+Hu7i5mzpwpioqKzGqysrLE1KlThVqtFl5eXiI2NtZsaQUhhDh69KgYPXq0UKlUQqvViri4uBYttSBEx1luod7095NF0LJvxRcHs+RuhYiIqM1Y+vutEKKTLh0uk5KSEmg0Guj1+g4x32rVtpN4b+9vmD4sEPEPD5a7HSIiojZh6e+3Xc2xItsTdm2e1S9cgZ2IiIjBim7P4MCuAIDTl0pRVWNsvpiIiKiDu+1glZWVhePHj8Nk4lVhnZFO4wLvLs6oNQkcz+sYq8oTERG1lsXBauPGjfjb3/5mtu1//ud/0KtXL4SGhiIkJKRDLX5JllEoFAiVbshcLG8zREREMrM4WL333nvQaDTS86SkJGzYsAGffvopUlNT0bVrV6xYsaJNmiTbNlhaKJTzrIiIqHNztLTw9OnTGDp0qPT8v//9L6ZNm4aZM2cCAFauXIknn3zS+h2SzQsLvDaBnSuwExFRJ2fxiFVlZaXZ5YUHDhzAmDFjpOe9evVq9mbE1HHVj1idvVKO0qoaeZshIiKSkcXBKigoCGlpaQCAK1eu4NixYxg1apS0Pz8/3+xUIXUe3l1U8O+qhhBAxkWeDiQios7L4lOBTzzxBObPn49jx45h165d6N+/v3QfPaBuBCskJKRNmiTbNzhAg4vFlci4oEd4b2+52yEiIpKFxcFq2bJlqKiowFdffQWtVot///vfZvt/+uknTJ8+3eoNkn0IDdBgW2Y+J7ATEVGnxlvatLOOdkubej/9egUzPzyIAE819i8bL3c7REREVmXp77fFI1aNqaqqwpdffony8nJERkbijjvuuJ3DkR0LubaW1YWiShSWGdCti0rmjoiIiNqfxZPXX3jhBTz33HPS8+rqaowYMQLz5s3Dyy+/jDvvvBPJyclt0iTZPo3aCb283QAARzmBnYiIOimLg9W2bdswYcIE6fnnn3+OrKwsnDlzBkVFRXj00Ufx+uuvt0mTZB8GB9SvwM5gRUREnZPFwSo7OxsDBw6Unm/fvh2PPPIIgoKCoFAo8Nxzz+HIkSNt0iTZh/r1rDIuFsvaBxERkVwsDlZKpRI3znNPSUnB8OHDpeddu3ZFUVGRdbsju1I/YvXLBT14TQQREXVGFger/v3745tvvgEAHDt2DNnZ2Rg3bpy0PysrC76+vtbvkOzGIJ0GDkoFLpcakF9SJXc7RERE7c7iqwJfeOEFTJ8+Hd999x2OHTuG+++/H8HBwdL+rVu3YtiwYW3SJNkHtbMD+vh0wcn8UvySo4efRi13S0RERO3K4hGr3/3ud9i6dSsGDx6M559/Hl9++aXZfldXVzz77LNWb5DsS9i1eVZHeUNmIiLqhFq0jtXEiRMxceLERvctX77cKg2RfRscqMGXh3O4AjsREXVKFo9YnTlzBtOnT0dJSUmDfXq9HjNmzMDZs2et2hzZnxtHrDiBnYiIOhuLg9Ubb7yBwMDARpdx12g0CAwMxBtvvGHV5sj+9PV1h7OjEiVVtcgqrJC7HSIionZlcbDat28fHn300Sb3P/bYY9i1a5dVmiL75eyoxAC/uvD9C+dZERFRJ2NxsMrKyoKPj0+T+729vZGTk2OVpsi+hdWvwM55VkRE1MlYHKw0Gg1+++23Jvf/+uuvzd7tmTqPwbwykIiIOimLg9WYMWPw9ttvN7n/H//4B0aPHm2Vpsi+1Y9YZV4sQa3RJHM3RERE7cfiYPXSSy9h27ZteOSRR3Do0CHo9Xro9XocPHgQv/vd7/D999/jpZdeasteyU706t4Fbs4OqKwx4tfLZXK3Q0RE1G4sXsfqrrvuwn/+8x/MnTsXiYmJZvu6deuG//u//8Pdd99t9QbJ/jgoFQjx1+Dguas4ekGP/lqeIiYios7B4mD166+/IioqCllZWfj+++9x5swZCCHQt29fREZGwtXVtS37JDszOKA+WBXjsaGBcrdDRETULiwOVn379oW/vz/GjRuHcePG4dFHH0XPnj3bsDWyZ9cnsPPKQCIi6jwsDlZ79+7F3r17sWfPHsTGxqKqqgo9evTA+PHjpbDl7+/flr2SHalfgf1EXgkMtUaoHB3kbYiIiKgdKEQr7jtSU1OD5ORk7NmzB3v27EFKSgoMBgPuuOMOnDp1qi367DBKSkqg0Wig1+s79PIUQgjc/doOFFXU4L/zRyIssKvcLREREbWapb/fLboJcz0nJyeMGTMG99xzD0aMGIHvv/8eH3zwAX799ddWN0wdi0KhQGhAV+w7fRlHLxQzWBERUadg8XILAFBVVYVdu3bhz3/+M0aPHg1PT08sXLgQZWVlePfdd5Gdnd1WfZIdql/P6hfOsyIiok7C4hGriIgIpKamonfv3hgzZgwWLFiAiIgI+Pr6tmV/ZMdC/euCVQaDFRERdRIWj1gdOHAA3t7eGDduHCZMmIDx48czVFGz6k//nSkoRUV1rbzNEBERtQOLg1VxcTHef/99uLq6YvXq1fD390doaChiY2Pxn//8B5cvX27LPskO+Xq4wNdDBZOou70NERFRR2dxsHJzc8OUKVOwatUqHDx4EFeuXMGaNWvg6uqKNWvWICAgACEhIW3ZK9kh3pCZiIg6kxZNXr+Rm5sbvLy84OXlBU9PTzg6OuLEiRPW7I06AE5gJyKizsTiyesmkwmHDx/Gnj17sHv3bvz0008oLy+XVmN/5513MG7cuLbslewQR6yIiKgzsThYde3aFeXl5fDz88PYsWOxdu1ajBs3Dr17927L/sjO1V8ZmFVYAX1FDTSuTjJ3RERE1HYsDlZvvPEGxo0bh759+7ZlP9TBeLo5o4eXK7KvVuDoxWKM7tNd7paIiIjajMVzrP7whz8wVFGrDL42z4o3ZCYioo6u1ZPXiSxVf0PmX3KKZe2DiIiorTFYUZvjiBUREXUWDFbU5kL8NVAqgPySKhSUVMndDhERUZthsKI256ZyxB0+XQBwPSsiIurYGKyoXYT6dwUAZHA9KyIi6sAYrKhdhAVyBXYiIur4GKyoXdy4ArsQQt5miIiI2giDFbWLAX7ucHJQoKiiBheKKuVuh4iIqE0wWFG7UDk6oL/WAwDwC+dZERFRB8VgRe2G61kREVFHx2BF7eZ6sCqWtxEiIqI2wmBF7aZ+AnvmxRKYTJzATkREHQ+DFbWbPj5d4OKkRJmhFmevlMndDhERkdXJGqz27duH6Oho6HQ6KBQKbNmyxWy/EAJxcXHQ6XRQq9UYO3Ysjh07ZlZjMBiwYMECeHt7w83NDdOmTcOFCxfMaoqKihATEwONRgONRoOYmBgUFxeb1WRnZyM6Ohpubm7w9vbGwoULUV1dbVaTkZGBiIgIqNVq+Pv749VXX+XSAS3g6KBEiO7aelY5nGdFREQdj6zBqry8HGFhYVi3bl2j+9esWYO1a9di3bp1SE1NhVarxaRJk1BaWirVLFq0CImJiUhISMD+/ftRVlaGqKgoGI1GqWbGjBlIT09HUlISkpKSkJ6ejpiYGGm/0WjE1KlTUV5ejv379yMhIQGbN2/GkiVLpJqSkhJMmjQJOp0OqampePvtt/Hmm29i7dq1bfDNdFw3rmdFRETU4QgbAUAkJiZKz00mk9BqtWLVqlXStqqqKqHRaMR7770nhBCiuLhYODk5iYSEBKnm4sWLQqlUiqSkJCGEEMePHxcAREpKilSTnJwsAIiTJ08KIYTYunWrUCqV4uLFi1LNpk2bhEqlEnq9XgghxPr164VGoxFVVVVSTXx8vNDpdMJkMln8OfV6vQAgHbez2XLkggha9q14YN1+uVshIiKymKW/3zY7x+rcuXPIz89HZGSktE2lUiEiIgIHDhwAAKSlpaGmpsasRqfTISQkRKpJTk6GRqPBvffeK9UMHz4cGo3GrCYkJAQ6nU6qmTx5MgwGA9LS0qSaiIgIqFQqs5rc3FycP3++yc9hMBhQUlJi9ujMQv3rTgUezytBda1J5m6IiIisy2aDVX5+PgDA19fXbLuvr6+0Lz8/H87OzvD09Gy2xsfHp8HxfXx8zGpufh9PT084Ozs3W1P/vL6mMfHx8dLcLo1Gg8DAwOY/eAfXs5sb3F0cUV1rwulLpbd+ARERkR2x2WBVT6FQmD0XQjTYdrObaxqrt0aNuDZxvbl+XnrpJej1eumRk5PTbO8dnVKp4EKhRETUYdlssNJqtQAajgYVFBRII0VarRbV1dUoKipqtubSpUsNjn/58mWzmpvfp6ioCDU1Nc3WFBQUAGg4qnYjlUoFDw8Ps0dnxwnsRETUUdlssAoODoZWq8WOHTukbdXV1di7dy/Cw8MBAEOGDIGTk5NZTV5eHjIzM6WaESNGQK/X49ChQ1LNwYMHodfrzWoyMzORl5cn1Wzfvh0qlQpDhgyRavbt22e2BMP27duh0+nQs2dP638BHVjYtRGrXzhiRUREHYyswaqsrAzp6elIT08HUDdhPT09HdnZ2VAoFFi0aBFWrlyJxMREZGZmYs6cOXB1dcWMGTMAABqNBk899RSWLFmCnTt34siRI5g1axZCQ0MxceJEAMCAAQMwZcoUzJs3DykpKUhJScG8efMQFRWFfv36AQAiIyMxcOBAxMTE4MiRI9i5cyeWLl2KefPmSSNMM2bMgEqlwpw5c5CZmYnExESsXLkSixcvvuWpSTJXP2J1+lIpKquNzRcTERHZk7a/QLFpu3fvFgAaPGbPni2EqFtyYfny5UKr1QqVSiXGjBkjMjIyzI5RWVkpYmNjhZeXl1Cr1SIqKkpkZ2eb1RQWFoqZM2cKd3d34e7uLmbOnCmKiorMarKyssTUqVOFWq0WXl5eIjY21mxpBSGEOHr0qBg9erRQqVRCq9WKuLi4Fi21IASXWxCi7p/rkNd2iKBl34rD5wvlboeIiOiWLP39VgjBpcPbU0lJCTQaDfR6faeebzX3k1TsOlmA5dED8eTIYLnbISIiapalv982O8eKOjZeGUhERB0RgxXJIuzaPKtfeGUgERF1IAxWJIv6Eauzl8tRUlUjczdERETWwWBFsujWRQX/rmoAQCZPBxIRUQfBYEWyCQvkelZERNSxMFiRbEL9uwLgCuxERNRxMFiRbMJ4ZSAREXUwDFYkm5BrwepicSUKywwyd0NERHT7GKxINh4uTujV3Q0AR62IiKhjYLAiWXE9KyIi6kgYrEhWXIGdiIg6EgYrktX1YFUM3raSiIjsHYMVyWqgnwYOSgWulFUjT18ldztERES3hcGKZKV2dkBfX3cAXM+KiIjsH4MVya5+PSuuwE5ERPaOwYpkN/jalYEcsSIiInvHYEWyu/HKQJOJE9iJiMh+MViR7Ppp3eHsqERpVS3OF5bL3Q4REVGrMViR7JwclBjo5wEAyLjIeVZERGS/GKzIJkgT2HMYrIiIyH4xWJFN4AR2IiLqCBisyCaEBdaNWGXm6lFrNMncDRERUeswWJFN6OXdBV1UjqiqMeFMQZnc7RAREbUKgxXZBKVSgUG6uhXYP00+j+TfCmHk0gtERGRnHOVugAgAkjLzkHmxBACw6VAONh3KgZ/GBcujB2JKiJ/M3REREVmGI1Yku6TMPDzz2c8orzaabc/XV+GZz35GUmaeTJ0RERG1DIMVycpoEljxzXE0dtKvftuKb47ztCAREdkFBiuS1aFzV5Gnr2pyvwCQp6/CoXNX268pIiKiVmKwIlkVlDYdqlpTR0REJCcGK5KVj7uLRXVdXZ3auBMiIqLbx2BFshoW7AU/jQsUt6hb+d0JnLlU2i49ERERtRaDFcnKQanA8uiBANAgXNU/76JyxKlLZYh6ez/+lXweQnAiOxER2SYGK5LdlBA/vDvrbmg15qcFtRoXvDfrbuxaGoGIvt1hqDXhz/89hqc3HkZhmUGmbomIiJqmEPzP/3ZVUlICjUYDvV4PDw8PuduxKUaTwKFzV1FQWgUfdxcMC/aCg7Ju3MpkEvjkwHms2nYS1UYTvLuo8NZjYYjo213mromIqDOw9PebwaqdMVjdnhN5JXgu4QhOX6q7n+DckcH405R+cHFykLkzIiLqyCz9/eapQLIrA/w88HXsKMweEQQA+Pinc3jwnZ84sZ2IiGwCgxXZHRcnB6x4IAQfzxmKbm7OOJlfyontRERkExisyG6N7++LbYtGm01sn/cpJ7YTEZF8GKzIrvm4u2DDnHvwl6iBcHZQ4ocTBZj8tx+x9/RluVsjIqJOiMGK7J5SqcDcUcH4b+xI9PHpgitlBsz++BBe/eY4DLVGudsjIqJOhMGKOowBfh74ZsEoPHHDxPYH1nFiOxERtR8GK+pQXJwc8OoDIfhoNie2ExFR+2Owog5pwgBObCciovbHYEUdVv3E9j9zYjsREbUTBivq0JRKBZ7ixHYiImonDFbUKXBiOxERtQcGK+o0bpzY7sWJ7URE1AYYrKjTmTDAF0mLRmMMJ7YTEZGVMVhRp+Tj7oJPbprYPuXvP2IfJ7YTEdFtYLCiTqt+YvuW+XUT2y+XGvAEJ7YTEdFtYLCiTm+grm5ie8zw6xPbH3znACe2ExFRizFYEaFuYvtrD16f2H4ir4QT24mIqMUYrIhuwIntRER0OxisiG7Cie1ERNRaDFZEjWhqYvtr33JiOxERNY3BiqgZN09s/2g/J7YTEVHTGKyIbqF+YvuHT9w0sT0lixPbiYjIjE0Hq7i4OCgUCrOHVquV9gshEBcXB51OB7VajbFjx+LYsWNmxzAYDFiwYAG8vb3h5uaGadOm4cKFC2Y1RUVFiImJgUajgUajQUxMDIqLi81qsrOzER0dDTc3N3h7e2PhwoWorq5us89OtmfiQF8kPTcao/t4101s35LJie1ERGTGpoMVAAwaNAh5eXnSIyMjQ9q3Zs0arF27FuvWrUNqaiq0Wi0mTZqE0tLrp2kWLVqExMREJCQkYP/+/SgrK0NUVBSMxuvzZGbMmIH09HQkJSUhKSkJ6enpiImJkfYbjUZMnToV5eXl2L9/PxISErB582YsWbKkfb4Eshk+Hi7Y+OSwZie2G00Cyb8V4r/pF5H8WyGMJo5qERF1Fgphw+cy4uLisGXLFqSnpzfYJ4SATqfDokWLsGzZMgB1o1O+vr5YvXo1/vCHP0Cv16N79+7417/+hccffxwAkJubi8DAQGzduhWTJ0/GiRMnMHDgQKSkpODee+8FAKSkpGDEiBE4efIk+vXrh23btiEqKgo5OTnQ6XQAgISEBMyZMwcFBQXw8PCw+DOVlJRAo9FAr9e36HVke47nluC5hCM4U1AGAHhqVDDuDNRg5daTyNNXSXV+Ghcsjx6IKSF+crVKRES3ydLfb5sfsTpz5gx0Oh2Cg4Px+9//HmfPngUAnDt3Dvn5+YiMjJRqVSoVIiIicODAAQBAWloaampqzGp0Oh1CQkKkmuTkZGg0GilUAcDw4cOh0WjMakJCQqRQBQCTJ0+GwWBAWlpas/0bDAaUlJSYPahjGKjzwNex5hPbF2xKNwtVAJCvr8Izn/2MpMw8OdokIqJ2ZNPB6t5778Wnn36K77//Hh988AHy8/MRHh6OwsJC5OfnAwB8fX3NXuPr6yvty8/Ph7OzMzw9PZut8fHxafDePj4+ZjU3v4+npyecnZ2lmqbEx8dLc7c0Gg0CAwNb8A2QrVM7101sf3/WECgUjdfUDwmv+OY4TwsSEXVwNh2s7rvvPvzud79DaGgoJk6ciO+++w4AsHHjRqlGcdOvmRCiwbab3VzTWH1rahrz0ksvQa/XS4+cnJxm68k+uaud0NxJdQEgT1+FQ+eutltPRETU/mw6WN3Mzc0NoaGhOHPmjHR14M0jRgUFBdLoklarRXV1NYqKipqtuXTpUoP3unz5slnNze9TVFSEmpqaBiNZN1OpVPDw8DB7UMdTUFp166IW1BERkX2yq2BlMBhw4sQJ+Pn5ITg4GFqtFjt27JD2V1dXY+/evQgPDwcADBkyBE5OTmY1eXl5yMzMlGpGjBgBvV6PQ4cOSTUHDx6EXq83q8nMzERe3vU5Mtu3b4dKpcKQIUPa9DOTffBxd7GozsvVuY07ISIiOdl0sFq6dCn27t2Lc+fO4eDBg3jkkUdQUlKC2bNnQ6FQYNGiRVi5ciUSExORmZmJOXPmwNXVFTNmzAAAaDQaPPXUU1iyZAl27tyJI0eOYNasWdKpRQAYMGAApkyZgnnz5iElJQUpKSmYN28eoqKi0K9fPwBAZGQkBg4ciJiYGBw5cgQ7d+7E0qVLMW/ePI5AEQBgWLAX/DQuaP7EMPDKlgxszcjjwqJERB2Uo9wNNOfChQuYPn06rly5gu7du2P48OFISUlBUFDdVVh/+tOfUFlZiWeffRZFRUW49957sX37dri7u0vH+N///V84OjriscceQ2VlJSZMmIBPPvkEDg4OUs3nn3+OhQsXSlcPTps2DevWrZP2Ozg44LvvvsOzzz6LkSNHQq1WY8aMGXjzzTfb6ZsgW+egVGB59EA889nPUOD6hHUA0nMPF0dkX63Es5//jLt7dMUrUwdgSJCXPA0TEVGbsOl1rDoirmPVsSVl5mHFN8cbXcdqdJ/ueH/fWby/7ywqa+oWqL0vRItlU/qjp7ebXC0TEZEFLP39ZrBqZwxWHZ/RJHDo3FUUlFbBx90Fw4K94KC8fpKwoKQKa3ecxv8dzoFJAI5KBWYND8LCCX3g5cY5WEREtojBykYxWFG9U/mlWLXtBHafqrsdjrvKEfPH34E54T3h4uRwi1cTEVF7YrCyUQxWdLOffr2Cv353Asfz6lbl9++qxtLJffFAmD+UyltNhyciovbAYGWjGKyoMSaTQOKRi3hz+ylpflaIvwdevn8Awnt7y9wdERExWNkoBitqTlWNER//dA7rd/+GMkMtAGB8fx+8dF9/9PF1v8WriYiorTBY2SgGK7JEYZkB/9h5Bp8fzEatSUCpAB6/pween9TH4sVIiYjIehisbBSDFbXE2ctlWJ10Et8fq7vtkquzA/4wpjfmjQmGq7NNL0NHRNShMFjZKAYrao3U81fx+ncn8EtOMQDAx12FJZF98ciQQLOlHIiIqG0wWNkoBitqLSEEvj2ahzXfn0TO1UoAQD9fd7x4f3+M7dsdCgUDFhFRW2GwslEMVnS7DLVG/Cs5C2/v+hX6yhoAwKg7vPHS/f0xSKeRuTsioo6JwcpGMViRtegrarBu9xlsPJCFaqMJCgXw8F0BWDq5L/w0arnbIyLqUBisbBSDFVlbztUKrPn+FL75JRcAoHJU4qlRwXhmbG+4uzjJ3B0RUcfAYGWjGKyoraTnFGPldydw6PxVAEA3N2c8N7EPpg/rAScHpczdERHZNwYrG8VgRW1JCIEdxy9hVdJJnL1cDgDo5e2GZff1R+RAX05wJyJqJQYrG8VgRe2hxmhCwqFs/O2HMygsrwYA3NPTEy/fPwB39fCUuTsiIvvDYGWjGKyoPZVW1eCfe8/igx/PwlBrAgBEDfbDnyb3R49urjJ3R0RkPxisbBSDFckhT1+Jt7afxuafL0AIwMlBgdkjeiJ2/B3o6uosd3tERDaPwcpGMViRnI7nlmDl1hPY/+sVAIBG7YTYcXfgifAgqBwdZO6OiMh2MVjZKAYrkpsQAntPX0b81pM4dakUABDopcYLk/sjerCfNMHdaBI4dO4qCkqr4OPugmHBXrx9DhF1WgxWNorBimyF0SSwOe0C3tx+CgWlBgBAWGBXvHL/AFwtN2DFN8eRp6+S6v00LlgePRBTQvzkapmISDYMVjaKwYpsTUV1LT788Rze2/sbKqqNTdbVj1W9O+tuhisi6nQs/f3mqoFEnZyrsyMWTuiDPS+Mxe+HBTZZV/9fYCu+OQ6jif89RkTUGAYrIgIA+Li74IEw/2ZrBIA8fRUOnbvaPk0REdkZR7kbICLbUVBadesiAC9uPorIQb4Y2tMLQ4M80a2Lqo07IyKyDwxWRCTxcXexqC7ragU++PEcPvjxHACgV3c33BPkhaE9PTG0pxd6dnPl7XOIqFNisCIiybBgL/hpXJCvr0Jjs6gUALq7q7BsSj+kZRfj8PmrOH2pDGcvl+Ps5XJ8eTgHAODdxRlDrwWte3p6YaDOgzeCJqJOgVcFtjNeFUi2LikzD8989jMAmIWrpq4KLK6oRlpWEVLPFyEt6yp+ydGj2mgyO6bayQF39eiKoUF1I1p39egKdxenNv4kRETWw+UWbBSDFdmDpMy8Vq9jVVVjROZFPVLPF+Hw+as4nFUEfWWNWY1SAQzw88A9Pa+dPgzyglZj2WlIIiI5MFjZKAYrshfWWnndZBL49XIZDl8LWqlZV5FztbJBXYCnWgpa9/T0wh3du0DJld6JyEYwWNkoBisiIF9fhcNZV+vCVtZVHM8twc1LY2nUThgS5CkFrVB/DVyceD9DIpIHg5WNYrAiaqjMUIsj2UXS6cMj2cWorDFfBd7ZQYnBARppiYchQZ7wdHO+5bF5z0MisgYGKxvFYEV0azVGE07klUhBK/V8Ea6UGRrU9fHpgqE9vXDPtVGtAE+12TIPtzNXjIjoRgxWNorBiqjlhBDIKqxA6vnrpw9/u1zeoM7XQyUt81BTa0L8tpMNlo3gPQ+JqDUYrGwUgxWRdRSWGZCWVYTDWUVIPX8VmRf1qDFa9q8zBQCtxgX7l43naUEisgiDlY1isCJqG1U1RvySU4zDWUXYfjwfv+Tob/ma+0O0GN67G4K6uSHIyxX+nmouZEpEjWKwslEMVkRt77/pF/FcQnqLX+egVMC/qxpB3VzRw8sVPbu5oUc3V+m5qzNvVkHUWVn6+81/SxBRh2PpPQ+nhmpRbRTILqxA1tVyVNWYkH21AtlXK5o4rgpB3VylEa4e3erCV1A3V3R1vfUVikTU8TFYEVGHY8k9D7UaF/xj+t3SHCshBApKDcgqrEBWYXndn1crkF1YjvOFFdBX1qCg1ICCUgNSzxc1OKaHiyN6eruhh5erWfgK6uYGH3eVVRY75dIRRLaPpwLbGU8FErWPlt7z8Fb0FTXIuloXsrLrg9e1ka5LJQ2XgriRi5MSPbxc0cPLDT3rTy12q/u7rqtl87q4dASRvDjHykYxWBG1n/YKI5XVRmRfvXGk63rwulhcCePNy8rf4MZ5XUHdXBHk5SaNePXwcoXa2UEKiVw6gkg+DFY2isGKqH3JffqsxmhCbnGl2UjX+cIKZF8tR/bVClTVmJp9vY+7M4oralDdxFISXDqCqH0wWNkoBisiqmcy1c/rKkfWDSNe2VcrcP5KOUqqai0+1vBeXhgc0BV+Ghfouqqh06jh19UF3dyczVajJ6LWYbCyUQxWRGSp4opq/Cs5C2/tON3qYzg7KuGncWkQuOr/9NOo4eHi2K7hS+5RRKLW4HILRER2rqurM4b29LKodtbwHnB2cECevhK5+irkFVficpkB1bUmab5XU7qoHOvCV1c1dJq6sOXX1QX+XdVSIHNxcrDKZ+IkfOroOGLVzjhiRUQtYTQJjFq965ZLRzQ2x6q61oRLJVXILa5Enr4KufpK5BVXIU9fiYvX/iyuqLGoD09XJ/hp1NB1rQta9X/309SFL63G5ZZXN3ISPtkzjlgREXUADkoFlkcPxDOf/QwFGl86Ynn0wEZPpTk7KhHo5YpAL9cmj19RXYs8fRXyiuuCV25xpfT3vGsjX+XVRhRV1KCoogbH80oaPY5CUbeA6s2BS9dVDV1XNXzdVYj7+nij4VBc+ywrvjmOSQO1dnNakKc0qTEcsWpnHLEiotaQ6xSaEAIllbXXglYlcq+NdOUVV+HitZGwfH0Vqo3NX91oqecn9sFdPTzhpnJEF5Uj3FQO1/50tKn7OPKUZufDyes2isGKiFrLVkdITCaBwvJqKXjVnXq8PterPnzd7o+Ns6NSCltuzo5S4JK2qa5vq/u7eZ1ZrbNjq1fD72inNG31/69sDYOVjWKwIqLOaP+Zy5j10aFb1vX17QKlQoEyQy3KDbUoNxitNhp2M1dnhwZhq0EwuymsuTop8afNGbhaXt3oMe1tXbGONPLW1gGRwcpGMVgRUWd0u5Pwyw21dWGruvba343Xgte1R7VRCmM3hrKyG2rKrtU1txK+tXi7OcPD1QkqRwe4OCnhcu1P6bmTA1ycHKC6cZvjtW2O9fuVDZ6rHOte4+LkABdHBzg5KFq9VEZHGnlrj4DIYGWjGKyIqLOy9v0bW0MIAUOt6aYQZjQLZNK26toGwexiUSVyiirbtMeWUCpwU/iq+7vKyQEuNwWyGwOdk4MCGw9koczQ9CK03dyc8V7MEKidHODkoISzoxJODgo4S3+vf7Q+3FlDewVEBisbxWBFRJ2ZvZ96Sv6tENM/SLll3esPhqCPTxdU1ZpQVWOEof7PGiOqakww1Nb9WVVjRFWtEYYak1RbV3dDTa3R7Bi3ug2SHJyvBSwnR+W1v9eFL2cHJZwcFXXPbwhkddvrXqMyC2n1r1PcEOaUN4U5hXRspVKB+Z//jMJ2ODXL5RaIiMjmTAnxw6SBWrudLD0s2At+GpdbntKcPqxHm32m+lE3Q61JCmrXw5lRCl9m4e1aMDPUGFFVa8KJvBL8eObKLd+rm5sznByUqDGaUG00obq27s+bh2SqjSZUG4G6/2M7BIA8fRUOnbuKEb27tct7MlgREVG7clAq2u1HztpuZ10xa1EoFNIpPaidWnWM5N8KLQpW62bc3eg/K6NJSCGr5tqjurb+T3F9e60Jhmt/1hgFqo1G1FzbX19fIwU2cdPza6+pNaLm2j7DTa8pqqjB5VLDLT9HQWnVLWushcGKiIioBaaE+OHdWXc3OKWptaNTmpaOvA0LbvyWSg5KBdTODlDDOrc6ai1LT836uLu0Qzd1GKyIiIhayN5PadrCyJs13G5AbAu2s4ytHVm/fj2Cg4Ph4uKCIUOG4Mcff5S7JSIiamf1pzQfuNMfI3p3s/kQcrP6kTetxnw0R6txsZulFuoDInA9ENaTKyDyqsAW+vLLLxETE4P169dj5MiR+Oc//4kPP/wQx48fR48ePW75el4VSEREtqQjrLzOdazs2L333ou7774b7777rrRtwIABePDBBxEfH3/L1zNYERERWZ+trLzOOVYtUF1djbS0NLz44otm2yMjI3HgwIFGX2MwGGAwXL9ioaSk8TvDExERUevZytWmnGPVAleuXIHRaISvr6/Zdl9fX+Tn5zf6mvj4eGg0GukRGBjYHq0SERGRDBisWuHmpfuFEE0u5//SSy9Br9dLj5ycnPZokYiIiGTAU4Et4O3tDQcHhwajUwUFBQ1GseqpVCqoVKr2aI+IiIhkxhGrFnB2dsaQIUOwY8cOs+07duxAeHi4TF0RERGRreCIVQstXrwYMTExGDp0KEaMGIH3338f2dnZ+OMf/yh3a0RERCQzBqsWevzxx1FYWIhXX30VeXl5CAkJwdatWxEUFCR3a0RERCQzrmPVzriOFRERkf2x9Pebc6yIiIiIrITBioiIiMhKOMeqndWfeeUK7ERERPaj/nf7VjOoGKzaWWlpKQBwBXYiIiI7VFpaCo1G0+R+Tl5vZyaTCbm5uXB3d29ytfbWKCkpQWBgIHJycux6Ujw/h23h57A9HeWz8HPYFn6OWxNCoLS0FDqdDkpl0zOpOGLVzpRKJQICAtrs+B4eHnb9P4p6/By2hZ/D9nSUz8LPYVv4OZrX3EhVPU5eJyIiIrISBisiIiIiK2Gw6iBUKhWWL19u9zd85uewLfwctqejfBZ+DtvCz2E9nLxOREREZCUcsSIiIiKyEgYrIiIiIithsCIiIiKyEgYrIiIiIithsLJz+/btQ3R0NHQ6HRQKBbZs2SJ3S60SHx+Pe+65B+7u7vDx8cGDDz6IU6dOyd1Wi7377rsYPHiwtDjdiBEjsG3bNrnbum3x8fFQKBRYtGiR3K20SFxcHBQKhdlDq9XK3VarXLx4EbNmzUK3bt3g6uqKO++8E2lpaXK31SI9e/Zs8M9DoVBg/vz5crfWIrW1tfh//+//ITg4GGq1Gr169cKrr74Kk8kkd2stVlpaikWLFiEoKAhqtRrh4eFITU2Vu61butVvnxACcXFx0Ol0UKvVGDt2LI4dO9YuvTFY2bny8nKEhYVh3bp1crdyW/bu3Yv58+cjJSUFO3bsQG1tLSIjI1FeXi53ay0SEBCAVatW4fDhwzh8+DDGjx+PBx54oN3+B90WUlNT8f7772Pw4MFyt9IqgwYNQl5envTIyMiQu6UWKyoqwsiRI+Hk5IRt27bh+PHjeOutt9C1a1e5W2uR1NRUs38WO3bsAAA8+uijMnfWMqtXr8Z7772HdevW4cSJE1izZg3eeOMNvP3223K31mJPP/00duzYgX/961/IyMhAZGQkJk6ciIsXL8rdWrNu9du3Zs0arF27FuvWrUNqaiq0Wi0mTZok3a+3TQnqMACIxMREuduwioKCAgFA7N27V+5Wbpunp6f48MMP5W6jVUpLS0WfPn3Ejh07REREhHjuuefkbqlFli9fLsLCwuRu47YtW7ZMjBo1Su42rO65554TvXv3FiaTSe5WWmTq1Kli7ty5ZtsefvhhMWvWLJk6ap2Kigrh4OAgvv32W7PtYWFh4pVXXpGpq5a7+bfPZDIJrVYrVq1aJW2rqqoSGo1GvPfee23eD0esyCbp9XoAgJeXl8ydtJ7RaERCQgLKy8sxYsQIudtplfnz52Pq1KmYOHGi3K202pkzZ6DT6RAcHIzf//73OHv2rNwttdjXX3+NoUOH4tFHH4WPjw/uuusufPDBB3K3dVuqq6vx2WefYe7cuVa9IX17GDVqFHbu3InTp08DAH755Rfs378f999/v8ydtUxtbS2MRiNcXFzMtqvVauzfv1+mrm7fuXPnkJ+fj8jISGmbSqVCREQEDhw40Obvz5swk80RQmDx4sUYNWoUQkJC5G6nxTIyMjBixAhUVVWhS5cuSExMxMCBA+Vuq8USEhLw888/28V8i6bce++9+PTTT9G3b19cunQJr7/+OsLDw3Hs2DF069ZN7vYsdvbsWbz77rtYvHgxXn75ZRw6dAgLFy6ESqXCE088IXd7rbJlyxYUFxdjzpw5crfSYsuWLYNer0f//v3h4OAAo9GIv/71r5g+fbrcrbWIu7s7RowYgddeew0DBgyAr68vNm3ahIMHD6JPnz5yt9dq+fn5AABfX1+z7b6+vsjKymrz92ewIpsTGxuLo0eP2u1/MfXr1w/p6ekoLi7G5s2bMXv2bOzdu9euwlVOTg6ee+45bN++vcF/zdqT++67T/p7aGgoRowYgd69e2Pjxo1YvHixjJ21jMlkwtChQ7Fy5UoAwF133YVjx47h3Xfftdtg9dFHH+G+++6DTqeTu5UW+/LLL/HZZ5/hiy++wKBBg5Ceno5FixZBp9Nh9uzZcrfXIv/6178wd+5c+Pv7w8HBAXfffTdmzJiBn3/+We7WbtvNI6FCiHYZHWWwIpuyYMECfP3119i3bx8CAgLkbqdVnJ2dcccddwAAhg4ditTUVPz973/HP//5T5k7s1xaWhoKCgowZMgQaZvRaMS+ffuwbt06GAwGODg4yNhh67i5uSE0NBRnzpyRu5UW8fPzaxDMBwwYgM2bN8vU0e3JysrCDz/8gK+++kruVlrlhRdewIsvvojf//73AOpCe1ZWFuLj4+0uWPXu3Rt79+5FeXk5SkpK4Ofnh8cffxzBwcFyt9Zq9Vf+5ufnw8/PT9peUFDQYBSrLXCOFdkEIQRiY2Px1VdfYdeuXXb9P+qbCSFgMBjkbqNFJkyYgIyMDKSnp0uPoUOHYubMmUhPT7fLUAUABoMBJ06cMPuXrT0YOXJkg+VHTp8+jaCgIJk6uj0bNmyAj48Ppk6dKncrrVJRUQGl0vzn08HBwS6XW6jn5uYGPz8/FBUV4fvvv8cDDzwgd0utFhwcDK1WK111CtTN6du7dy/Cw8Pb/P05YmXnysrK8Ouvv0rPz507h/T0dHh5eaFHjx4ydtYy8+fPxxdffIH//ve/cHd3l86RazQaqNVqmbuz3Msvv4z77rsPgYGBKC0tRUJCAvbs2YOkpCS5W2sRd3f3BvPb3Nzc0K1bN7ua97Z06VJER0ejR48eKCgowOuvv46SkhK7G1V4/vnnER4ejpUrV+Kxxx7DoUOH8P777+P999+Xu7UWM5lM2LBhA2bPng1HR/v8CYqOjsZf//pX9OjRA4MGDcKRI0ewdu1azJ07V+7WWuz777+HEAL9+vXDr7/+ihdeeAH9+vXDk08+KXdrzbrVb9+iRYuwcuVK9OnTB3369MHKlSvh6uqKGTNmtH1zbX7dIbWp3bt3CwANHrNnz5a7tRZp7DMAEBs2bJC7tRaZO3euCAoKEs7OzqJ79+5iwoQJYvv27XK3ZRX2uNzC448/Lvz8/ISTk5PQ6XTi4YcfFseOHZO7rVb55ptvREhIiFCpVKJ///7i/fffl7ulVvn+++8FAHHq1Cm5W2m1kpIS8dxzz4kePXoIFxcX0atXL/HKK68Ig8Egd2st9uWXX4pevXoJZ2dnodVqxfz580VxcbHcbd3SrX77TCaTWL58udBqtUKlUokxY8aIjIyMdulNIYQQbR/fiIiIiDo+zrEiIiIishIGKyIiIiIrYbAiIiIishIGKyIiIiIrYbAiIiIishIGKyIiIiIrYbAiIiIishIGKyIiIiIrYbAiIptz/vx5KBQKpKeny92K5OTJkxg+fDhcXFxw55133taxFAoFtmzZYpW+bMGuXbvQv3//Ft0r79tvv8Vdd91l1/fXI2oMgxURNTBnzhwoFAqsWrXKbPuWLVugUChk6kpey5cvh5ubG06dOoWdO3c2WZefn48FCxagV69eUKlUCAwMRHR0dLOvuR179uyBQqFAcXFxmxzfEn/605/wyiuvSDcm/uSTT9C1a1ezmhMnTiAgIAAPP/wwDAYDoqKioFAo8MUXX8jQMVHbYbAioka5uLhg9erVKCoqkrsVq6murm71a3/77TeMGjUKQUFB6NatW6M158+fx5AhQ7Br1y6sWbMGGRkZSEpKwrhx4zB//vxWv3d7EEKgtra2xa87cOAAzpw5g0cffbTJmtTUVIwePRqTJ0/Gv//9b6hUKgDAk08+ibfffrvVPRPZIgYrImrUxIkTodVqER8f32RNXFxcg9Nif/vb39CzZ0/p+Zw5c/Dggw9i5cqV8PX1RdeuXbFixQrU1tbihRdegJeXFwICAvDxxx83OP7JkycRHh4OFxcXDBo0CHv27DHbf/z4cdx///3o0qULfH19ERMTgytXrkj7x44di9jYWCxevBje3t6YNGlSo5/DZDLh1VdfRUBAAFQqFe68804kJSVJ+xUKBdLS0vDqq69CoVAgLi6u0eM8++yzUCgUOHToEB555BH07dsXgwYNwuLFi5GSktLoaxobcUpPT4dCocD58+cBAFlZWYiOjoanpyfc3NwwaNAgbN26FefPn8e4ceMAAJ6enlAoFJgzZw6AuqC0Zs0a9OrVC2q1GmFhYfjPf/7T4H2///57DB06FCqVCj/++CN++eUXjBs3Du7u7vDw8MCQIUNw+PDhRnsHgISEBERGRsLFxaXR/bt27cL48ePx5JNP4qOPPoKDg4O0b9q0aTh06BDOnj3b5PGJ7A2DFRE1ysHBAStXrsTbb7+NCxcu3Naxdu3ahdzcXOzbtw9r165FXFwcoqKi4OnpiYMHD+KPf/wj/vjHPyInJ8fsdS+88AKWLFmCI0eOIDw8HNOmTUNhYSEAIC8vDxEREbjzzjtx+PBhJCUl4dKlS3jsscfMjrFx40Y4Ojrip59+wj//+c9G+/v73/+Ot956C2+++SaOHj2KyZMnY9q0aThz5oz0XoMGDcKSJUuQl5eHpUuXNjjG1atXkZSUhPnz58PNza3B/ptPjbXE/PnzYTAYsG/fPmRkZGD16tXo0qULAgMDsXnzZgDAqVOnkJeXh7///e8AgP/3//4fNmzYgHfffRfHjh3D888/j1mzZmHv3r1mx/7Tn/6E+Ph4nDhxAoMHD8bMmTMREBCA1NRUpKWl4cUXX4STk1OTve3btw9Dhw5tdF9iYiKmTp2KV155BW+88UaD/UFBQfDx8cGPP/7Y2q+GyPYIIqKbzJ49WzzwwANCCCGGDx8u5s6dK4QQIjExUdz4r43ly5eLsLAws9f+7//+rwgKCjI7VlBQkDAajdK2fv36idGjR0vPa2trhZubm9i0aZMQQohz584JAGLVqlVSTU1NjQgICBCrV68WQgjx5z//WURGRpq9d05OjgAgTp06JYQQIiIiQtx55523/Lw6nU789a9/Ndt2zz33iGeffVZ6HhYWJpYvX97kMQ4ePCgAiK+++uqW7wdAJCYmCiGE2L17twAgioqKpP1HjhwRAMS5c+eEEEKEhoaKuLi4Ro/V2OvLysqEi4uLOHDggFntU089JaZPn272ui1btpjVuLu7i08++eSWn6GeRqMRn376qdm2DRs2CAcHB+Hg4CD+/Oc/N/v6u+66q8nPRmSPOGJFRM1avXo1Nm7ciOPHj7f6GIMGDZImNgOAr68vQkNDpecODg7o1q0bCgoKzF43YsQI6e+Ojo4YOnQoTpw4AQBIS0vD7t270aVLF+nRv39/AHXzoeo1NZpSr6SkBLm5uRg5cqTZ9pEjR0rvZQkhBAC0yeT+hQsX4vXXX8fIkSOxfPlyHD16tNn648ePo6qqCpMmTTL7fj799FOz7wZo+P0sXrwYTz/9NCZOnIhVq1Y1qL9ZZWVlo6cB1Wo1Jk2ahA8++KDZ71GtVqOioqLZ9yCyJwxWRNSsMWPGYPLkyXj55Zcb7FMqlVKgqFdTU9Og7uZTSQqFotFtllx6Xx9cTCYToqOjkZ6ebvY4c+YMxowZI9U3dlquuePWE0K0KCT16dMHCoWiRWEMgBQ4b/web/4On376aZw9exYxMTHIyMjA0KFDm530Xf89fvfdd2bfzfHjx83mWQENv5+4uDgcO3YMU6dOxa5duzBw4EAkJiY2+V7e3t6NXuDg4OCALVu2YMiQIRg3blyTwfzq1avo3r17k8cnsjcMVkR0S6tWrcI333yDAwcOmG3v3r078vPzzUKBNdeeunHCd21tLdLS0qRRqbvvvhvHjh1Dz549cccdd5g9LA1TAODh4QGdTof9+/ebbT9w4AAGDBhg8XG8vLwwefJkvPPOOygvL2+wv6nlEOpDRV5enrStse8wMDAQf/zjH/HVV19hyZIl+OCDDwAAzs7OAACj0SjVDhw4ECqVCtnZ2Q2+m8DAwFt+lr59++L555/H9u3b8fDDD2PDhg1N1t51111NhiaVSoWvvvoKw4YNw7hx45CZmWm2v6qqCr/99hvuuuuuW/ZEZC8YrIjolkJDQzFz5swGoyRjx47F5cuXsWbNGvz222945513sG3bNqu97zvvvIPExEScPHkS8+fPR1FREebOnQugbkL31atXMX36dOnKsu3bt2Pu3LlmIcMSL7zwAlavXo0vv/wSp06dwosvvoj09HQ899xzLTrO+vXrYTQaMWzYMGzevBlnzpzBiRMn8I9//MPstOaN6sNOXFwcTp8+je+++w5vvfWWWc2iRYvw/fff49y5c/j555+xa9cuKfQFBQVBoVDg22+/xeXLl1FWVgZ3d3csXboUzz//PDZu3IjffvsNR44cwTvvvIONGzc22X9lZSViY2OxZ88eZGVl4aeffkJqamqzAXPy5MkNQumNnJ2dsXnzZoSHh2P8+PHIyMiQ9qWkpEClUjX53RDZIwYrIrLIa6+91uC034ABA7B+/Xq88847CAsLw6FDhxq9Yq61Vq1ahdWrVyMsLAw//vgj/vvf/8Lb2xsAoNPp8NNPP8FoNGLy5MkICQnBc889B41GYzafyxILFy7EkiVLsGTJEoSGhiIpKQlff/01+vTp06LjBAcH4+eff8a4ceOwZMkShISEYNKkSdi5cyfefffdRl/j5OSETZs24eTJkwgLC8Pq1avx+uuvm9UYjUbMnz8fAwYMwJQpU9CvXz+sX78eAODv748VK1bgxRdfhK+vL2JjYwHU/fP6y1/+gvj4eAwYMACTJ0/GN998g+Dg4Cb7d3BwQGFhIZ544gn07dsXjz32GO677z6sWLGiydfMmjULx48fx6lTp5qscXJywv/93/9hzJgxGD9+vDRHbNOmTZg5cyZcXV2bfC2RvVGIm/9NSURE1AJ/+tOfoNfrm1zOojGXL19G//79cfjw4WbDHpG94YgVERHdlldeeQVBQUEtOgV77tw5rF+/nqGKOhyOWBERERFZCUesiIiIiKyEwYqIiIjIShisiIiIiKyEwYqIiIjIShisiIiIiKyEwYqIiIjIShisiIiIiKyEwYqIiIjIShisiIiIiKzk/wOavSIcCGPy+wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1412: FutureWarning: The default value of `n_init` will change from 10 to 'auto' in 1.4. Set the value of `n_init` explicitly to suppress the warning\n", + " super()._check_params_vs_input(X, default_n_init=10)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\sklearn\\cluster\\_kmeans.py:1436: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1.\n", + " warnings.warn(\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:1770: UserWarning: Geometry is in a geographic CRS. Results from 'centroid' are likely incorrect. Use 'GeoSeries.to_crs()' to re-project geometries to a projected CRS before this operation.\n", + "\n", + " centroid = scored_neighbourhoods.geometry.centroid.iloc[0]\n", + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:1818: ShapelyDeprecationWarning: The 'type' attribute is deprecated, and will be removed in the future. You can use the 'geom_type' attribute instead.\n", + " if row.geometry.type == 'Point':\n", + "C:\\Users\\b8008458\\AppData\\Local\\Temp\\ipykernel_37268\\3225741462.py:1821: ShapelyDeprecationWarning: The 'type' attribute is deprecated, and will be removed in the future. You can use the 'geom_type' attribute instead.\n", + " elif row.geometry.type == 'MultiLineString' or row.geometry.type == 'LineString':\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n", + "c:\\Users\\b8008458\\Anaconda3\\envs\\ox_151\\Lib\\site-packages\\geopandas\\geodataframe.py:1538: SettingWithCopyWarning: \n", + "A value is trying to be set on a copy of a slice from a DataFrame.\n", + "Try using .loc[row_indexer,col_indexer] = value instead\n", + "\n", + "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n", + " super().__setitem__(key, value)\n" + ] + }, + { + "ename": "TypeError", + "evalue": "unsupported operand type(s) for /: 'str' and 'str'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[6], line 1881\u001b[0m\n\u001b[0;32m 1877\u001b[0m scored_neighbourhoods\u001b[38;5;241m.\u001b[39mto_file(geopackage_file_path, driver\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGPKG\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 1880\u001b[0m \u001b[38;5;66;03m## export rat runs \u001b[39;00m\n\u001b[1;32m-> 1881\u001b[0m geopackage_file_path \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mC:\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mUsers\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mb8008458\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mOneDrive - Newcastle University\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43m2022 to 2023\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mPhD\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mltnDetection\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mLTN-Detection\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mdata\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mrat_runs\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mrat_runs_\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mplace_name\u001b[49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m.gpkg\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m)\n\u001b[0;32m 1884\u001b[0m \u001b[38;5;66;03m# Send to geopackage \u001b[39;00m\n\u001b[0;32m 1885\u001b[0m geometry_column \u001b[38;5;241m=\u001b[39m rat_run_edges\u001b[38;5;241m.\u001b[39mgeometry\u001b[38;5;241m.\u001b[39mname\n", + "\u001b[1;31mTypeError\u001b[0m: unsupported operand type(s) for /: 'str' and 'str'" + ] + } + ], + "source": [ + "for place in places:\n", + "\n", + " # get boundary\n", + " def set_location_boundary(place):\n", + " \"\"\"\n", + " Sets up the location boundary by geocoding the given place and buffering it.\n", + "\n", + " Parameters:\n", + " place (str): The name or address of the place to geocode.\n", + "\n", + " Returns:\n", + " geopandas.GeoDataFrame: The buffered boundary of the location.\n", + " \"\"\"\n", + " # Set location and get boundary\n", + " boundary = ox.geocode_to_gdf(place)\n", + " boundary = boundary.to_crs('EPSG:27700')\n", + "\n", + " # Buffer boundary to ensure clips include riverlines which may act as borders between geographies\n", + " boundary_buffered = boundary.buffer(50)\n", + "\n", + " return boundary_buffered, boundary\n", + "\n", + " boundary_buffered, boundary = set_location_boundary(place)\n", + "\n", + "\n", + "\n", + "\n", + " \"\"\"\n", + " This code retrieves street nodes and edges for walking and driving from OpenStreetMap within our area boundary, and loads the OS Open Roads network dataset.\n", + "\n", + " Functions:\n", + " - get_street_networks: Retrieves street networks for all, walking, and driving modes within the specified boundary.\n", + " \"\"\"\n", + "\n", + " def get_OSM_street_networks(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves street networks for all, walking, and driving modes within the specified boundary.\n", + "\n", + " Parameters:\n", + " - boundary_buffered: A GeoDataFrame representing the boundary of the area of interest.\n", + "\n", + " Returns:\n", + " - all_edges: A GeoDataFrame containing the edges (streets) of the entire street network.\n", + " - all_nodes: A GeoDataFrame containing the nodes (intersections) of the entire street network.\n", + " - walk_edges: A GeoDataFrame containing the edges (streets) of the walking street network.\n", + " - walk_nodes: A GeoDataFrame containing the nodes (intersections) of the walking street network.\n", + " - drive_edges: A GeoDataFrame containing the edges (streets) of the driving street network.\n", + " - drive_nodes: A GeoDataFrame containing the nodes (intersections) of the driving street network.\n", + " - common_nodes_gdf: A GeoDataFrame containing the common nodes between the driving and walking street networks.\n", + " \"\"\"\n", + "\n", + " # Reset boundary_buffered crs for passing to OSM\n", + " boundary_buffered_4326 = boundary_buffered.to_crs('4326')\n", + "\n", + " # Get street networks\n", + " all_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='all', simplify=False)\n", + " walk_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='walk', simplify=True)\n", + " drive_streets = ox.graph_from_polygon(boundary_buffered_4326.geometry.iloc[0], network_type='drive', simplify=False)\n", + "\n", + " all_edges = ox.graph_to_gdfs(all_streets, nodes=False, edges=True)\n", + " all_nodes = ox.graph_to_gdfs(all_streets, nodes=True, edges=False)\n", + "\n", + " walk_edges = ox.graph_to_gdfs(walk_streets, nodes=False, edges=True)\n", + " walk_nodes = ox.graph_to_gdfs(walk_streets, nodes=True, edges=False)\n", + "\n", + " drive_edges = ox.graph_to_gdfs(drive_streets, nodes=False, edges=True)\n", + " drive_nodes = ox.graph_to_gdfs(drive_streets, nodes=True, edges=False)\n", + "\n", + " # Find the common nodes between networks\n", + " # This ensures that shortest paths between points should always be able to be calculated\n", + " common_nodes = drive_nodes.merge(walk_nodes, on='osmid', suffixes=('_drive', '_walk'))\n", + " common_nodes_gdf = gpd.GeoDataFrame(common_nodes, geometry='geometry_drive')\n", + "\n", + " return all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets\n", + "\n", + "\n", + " # get street networks\n", + " all_edges, all_nodes, walk_edges, walk_nodes, drive_edges, drive_nodes, common_nodes_gdf, all_streets, walk_streets, drive_streets = get_OSM_street_networks(boundary_buffered)\n", + " #os_open_roads = get_OS_roads() this is now got at the start of the code to avoid re-reading\n", + "\n", + "\n", + " def retrieve_osm_features(polygon, tags):\n", + " \"\"\"\n", + " Retrieves OpenStreetMap features based on the specified polygon and tags.\n", + "\n", + " Args:\n", + " polygon (Polygon): The polygon to retrieve features within.\n", + " tags (dict): The tags to filter the features.\n", + "\n", + " Returns:\n", + " GeoDataFrame: The retrieved OpenStreetMap features.\n", + " \"\"\"\n", + " try:\n", + " features = ox.features_from_polygon(polygon=polygon, tags=tags)\n", + " except Exception as e:\n", + " error_message = str(e)\n", + " if \"There are no data elements in the server response\" in error_message:\n", + " print(\"No data elements found for the specified location/tags.\")\n", + " features = gpd.GeoDataFrame() # Create an empty GeoDataFrame\n", + " else:\n", + " # Handle other exceptions here if needed\n", + " print(\"An error occurred:\", error_message)\n", + " features = None\n", + " return features\n", + "\n", + "\n", + " def get_railways(place):\n", + " \"\"\"\n", + " This retrievies and processes OpenStreetMap (OSM) railways data for a specified place.\n", + "\n", + " Args:\n", + " place (str): The name of the place to retrieve OSM features for.\n", + "\n", + " Returns:\n", + " railways (geopandas.GeoDataFrame): A GeoDataFrame containing the railways within the specified place.\n", + " \"\"\"\n", + "\n", + " # for unknown reasons, using rail = ox.graph_from_place(place, custom_filter='[\"railway\"]')\n", + " # doesn't ALWAYS retrive the full rail network, hence why multiple lines are used to achive the same result\n", + "\n", + " # Define railway types to retrieve\n", + " railway_types = [\"\", \"rail\", \"light_rail\", \"narrow_gauge\", \"subway\", \"tram\"]\n", + "\n", + " # Initialize an empty graph\n", + " combined_railways = nx.MultiDiGraph()\n", + "\n", + " for railway_type in railway_types:\n", + " try:\n", + " # Fetch the railway network for the specified type\n", + " network = ox.graph_from_place(place, simplify=False, custom_filter=f'[\"railway\"~\"{railway_type}\"]')\n", + "\n", + " # Ensure the fetched network is a MultiDiGraph\n", + " if not isinstance(network, nx.MultiDiGraph):\n", + " network = nx.MultiDiGraph(network)\n", + "\n", + " except Exception as e:\n", + " print(f\"No railway data found for '{railway_type}'.\")\n", + " network = nx.MultiDiGraph()\n", + "\n", + " # Compose the networks\n", + " combined_railways = nx.compose(combined_railways, network)\n", + "\n", + " # Convert to GeoDataFrame\n", + " railways = ox.graph_to_gdfs(combined_railways, nodes=False, edges=True)\n", + "\n", + " # Drop any other railway types that aren't needed\n", + " railways = railways.loc[(~railways[\"railway\"].isin([\"tunnel\", \"abandoned\", \"razed\", \"disused\", \"funicular\", \"monorail\", \"miniature\"]))]\n", + "\n", + " # Drop rows where any of the specified columns have values \"True\" or \"yes\"\n", + " columns_to_check = ['tunnel', 'abandoned', 'razed', 'disused', 'funicular', 'monorail', 'miniature']\n", + " railways = railways.loc[~railways[railways.columns.intersection(columns_to_check)].isin(['True', 'yes']).any(axis=1)]\n", + "\n", + " # Set railways CRS\n", + " railways = railways.to_crs('EPSG:27700')\n", + "\n", + " return railways\n", + "\n", + "\n", + "\n", + " \n", + " ## get urban footprints from GUF\n", + "\n", + " def get_guf(place):\n", + " \"\"\"\n", + " Retrieves a clipped GeoDataFrame of GUF urban areas within a specified place boundary.\n", + "\n", + " Parameters:\n", + " - place (str): The name or address of the place to retrieve urban areas for.\n", + "\n", + " Returns:\n", + " - gdf_clipped (GeoDataFrame): A GeoDataFrame containing the clipped urban areas within the specified place boundary.\n", + " \"\"\"\n", + "\n", + " # Step 1: Access the WMS Service\n", + " wms_url = 'https://geoservice.dlr.de/eoc/land/wms?GUF04_DLR_v1_Mosaic'\n", + " wms = WebMapService(wms_url, version='1.1.1')\n", + "\n", + " # Step 2: Identify the Layer with ID 102. This is the Global Urban Footprint layer GUF\n", + " for layer_name, layer in wms.contents.items():\n", + " if '102' in layer_name:\n", + " print(f\"Layer ID 102 found: {layer_name}\")\n", + "\n", + " # Assuming 'GUF04_DLR_v1_Mosaic' is the layer with ID 102\n", + " layer = 'GUF04_DLR_v1_Mosaic' # Replace with the actual layer name if different\n", + "\n", + " # Step 3: Get the polygon boundary using osmnx\n", + " boundary_gdf = ox.geocode_to_gdf(place)\n", + " boundary = boundary_gdf.to_crs('EPSG:27700')\n", + " # buffer boundary to ensure clips include riverlines which may act as borders between geographies\n", + " boundary_buffered = boundary.buffer(100)\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + " boundary_polygon = boundary_gdf.geometry[0]\n", + " wms_boundary = boundary_buffered.geometry[0]\n", + "\n", + " # Convert the polygon to a bounding box\n", + " minx, miny, maxx, maxy = wms_boundary.bounds\n", + "\n", + " # Step 4: Request the data from WMS using the bounding box\n", + " width = 1024\n", + " height = 1024\n", + " response = wms.getmap(\n", + " layers=[layer],\n", + " srs='EPSG:4326',\n", + " bbox=(minx, miny, maxx, maxy),\n", + " size=(width, height),\n", + " format='image/geotiff'\n", + " )\n", + "\n", + " # Step 5: Load the Raster Data into Rasterio\n", + " with MemoryFile(response.read()) as memfile:\n", + " with memfile.open() as src:\n", + " image = src.read(1) # Read the first band\n", + " transform = src.transform\n", + " crs = src.crs\n", + "\n", + " # Clip the raster data to the polygon\n", + " out_image, out_transform = rio_mask(src, [mapping(wms_boundary)], crop=True) # Use renamed mask function\n", + " out_meta = src.meta.copy()\n", + " out_meta.update({\"driver\": \"GTiff\",\n", + " \"height\": out_image.shape[1],\n", + " \"width\": out_image.shape[2],\n", + " \"transform\": out_transform,\n", + " \"crs\": crs})\n", + "\n", + " # Step 6: Convert Raster to Vector\n", + " mask_arr = (out_image[0] != 0).astype(np.uint8) # Assuming non-zero values are urban areas\n", + "\n", + " shapes_gen = shapes(mask_arr, mask=mask_arr, transform=out_transform)\n", + "\n", + " polygons = []\n", + " for geom, value in shapes_gen:\n", + " polygons.append(shape(geom))\n", + "\n", + " # Create a GeoDataFrame from the polygons\n", + " gdf = gpd.GeoDataFrame({'geometry': polygons}, crs=crs)\n", + "\n", + " # Step 7: Create Buffers Around Urban Areas\n", + " buffer_distance = 100 # Buffer distance in meters (adjust as needed)\n", + " gdf_buffered = gdf.copy()\n", + " gdf_buffered['geometry'] = gdf['geometry'].buffer(buffer_distance)\n", + "\n", + " # Step 8: Clip the GeoDataFrame to the boundary of the place\n", + " gdf_clipped = gpd.clip(gdf, boundary_gdf)\n", + "\n", + " return gdf_clipped\n", + "\n", + "\n", + " guf = get_guf(place)\n", + "\n", + "\n", + "\n", + "\n", + " ## get residential areas\n", + "\n", + " def get_residential_areas(polygon):\n", + " polygon = polygon.to_crs('EPSG:4326')\n", + " # Retrieve features from OpenStreetMap\n", + " features = ox.features_from_polygon(polygon.iloc[0], tags={'landuse': 'residential'})\n", + " \n", + " # Convert features to a GeoDataFrame\n", + " gdf = gpd.GeoDataFrame.from_features(features)\n", + " gdf = gdf.set_crs('EPSG:4326')\n", + " \n", + " return gdf\n", + "\n", + "\n", + " residential_areas = get_residential_areas(boundary_buffered)\n", + " \n", + " \n", + "\n", + " ## join urban foot prints and residential areas\n", + " # this is to create a single polygon of where neighbourhoods can be found within\n", + "\n", + " def join_geodataframes(gdf1, gdf2):\n", + " # Ensure both GeoDataFrames have the exact same CRS\n", + " target_crs = 'EPSG:4326' # WGS 84\n", + " gdf1 = gdf1.to_crs(target_crs)\n", + " gdf2 = gdf2.to_crs(target_crs)\n", + " \n", + " # Concatenate GeoDataFrames\n", + " joined_gdf = pd.concat([gdf1, gdf2], ignore_index=True)\n", + " \n", + " return gpd.GeoDataFrame(joined_gdf, crs=target_crs)\n", + "\n", + "\n", + " guf_residential_gdf = join_geodataframes(guf, residential_areas)\n", + "\n", + "\n", + " ## create a small buffer to ensure all areas a captured correctly\n", + "\n", + " def buffer_geometries_in_meters(gdf, distance):\n", + " # Define the World Mercator projected CRS\n", + " projected_crs = 'EPSG:3395' # World Mercator\n", + "\n", + " # Project to the new CRS\n", + " gdf_projected = gdf.to_crs(projected_crs)\n", + " \n", + " # Buffer the geometries\n", + " gdf_projected['geometry'] = gdf_projected['geometry'].buffer(distance)\n", + " \n", + " # Reproject back to the original CRS\n", + " gdf_buffered = gdf_projected.to_crs(gdf.crs)\n", + " \n", + " return gdf_buffered\n", + "\n", + "\n", + " guf_residential_gdf = buffer_geometries_in_meters(guf_residential_gdf, 100) # Buffer by 100 meters\n", + "\n", + "\n", + " ## union into one gdf\n", + "\n", + " def unary_union_polygons(gdf):\n", + " # Combine all geometries into a single geometry\n", + " unified_geometry = unary_union(gdf['geometry'])\n", + " \n", + " # Create a new GeoDataFrame with a single row containing the unified geometry\n", + " combined_gdf = gpd.GeoDataFrame({'geometry': [unified_geometry]}, crs=gdf.crs)\n", + " \n", + " return combined_gdf\n", + "\n", + "\n", + " guf_residential_gdf = unary_union_polygons(guf_residential_gdf)\n", + "\n", + " # set to BNG\n", + " guf_residential_gdf = guf_residential_gdf.to_crs(\"27700\")\n", + "\n", + " # Function to remove holes from neighbourhoods\n", + " def remove_holes(polygon):\n", + " if polygon.geom_type == 'Polygon':\n", + " return Polygon(polygon.exterior)\n", + " else:\n", + " return polygon\n", + "\n", + " # remove holes from urban footprint\n", + " guf_residential_gdf['geometry'] = guf_residential_gdf['geometry'].apply(remove_holes)\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " def get_rivers(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves river features within a given boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): A GeoDataFrame representing the buffered boundary.\n", + "\n", + " Returns:\n", + " GeoDataFrame: A GeoDataFrame containing the river features within the boundary.\n", + " \"\"\"\n", + " # Ensure the boundary is in the correct CRS for the query\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + "\n", + " # Check the content of boundary_buffered to ensure it's not empty and correctly transformed\n", + " if boundary_buffered.empty:\n", + " raise ValueError(\"The provided boundary is empty.\")\n", + "\n", + " # Define the tags for waterways\n", + " tags = {\"waterway\": [\"river\", \"rapids\"]}\n", + "\n", + " try:\n", + " # Fetch features from OSM using the boundary geometry\n", + " polygon = boundary_buffered.geometry.iloc[0]\n", + " rivers = ox.features_from_polygon(polygon=polygon, tags=tags)\n", + "\n", + " # Dropping rows where 'tunnel' is equal to 'culvert'\n", + " if 'tunnel' in rivers.columns:\n", + " rivers = rivers[rivers['tunnel'] != 'culvert']\n", + "\n", + " # Convert the CRS back to the desired one\n", + " rivers = rivers.to_crs('EPSG:27700')\n", + "\n", + " # Set the geometry column explicitly\n", + " rivers = rivers.set_geometry('geometry')\n", + "\n", + " return rivers\n", + "\n", + " except InsufficientResponseError:\n", + " print(\"No data elements found for the given boundary and tags.\")\n", + " empty_geometry = {'geometry': [LineString()]}\n", + " rivers = gpd.GeoDataFrame(empty_geometry, crs='EPSG:27700')\n", + " return rivers # Return an empty GeoDataFrame if no data found\n", + "\n", + "\n", + "\n", + " def get_landuse(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves the landuse features based on the specified boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): The buffered boundary polygon.\n", + "\n", + " Returns:\n", + " GeoDataFrame: The landuse features.\n", + " \"\"\"\n", + " # reset boundary crs to allow for features to be found\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + " # Define tags\n", + " tags = {\"landuse\": [\"industrial\", \"railway\", \"brownfield\", \"commercial\", \"farmland\", \"meadow\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " landuse = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " landuse = landuse.to_crs('27700')\n", + "\n", + " ## get unsuitable \"nature\" types\n", + " # Define tags\n", + " tags = {\"natural\": [\"wood\", \"water\", \"scrub\", \"coastline\", \"beach\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " nature = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " nature = nature.to_crs('27700')\n", + "\n", + " ## get unsuitable \"lesiure\" types. This is mainly for golfcourses\n", + " # Define tags\n", + " tags = {\"leisure\": [\"golf_course\", \"track\", \"park\"]}\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " leisure = ox.features_from_polygon(polygon=boundary_buffered.iloc[0], tags=tags)\n", + " # set/reset crs\n", + " leisure = leisure.to_crs('27700')\n", + " # Define the tags for aeroway\n", + " aeroway_tags = {\"aeroway\": [\"aerodrome\"]}\n", + " # Use the function to retrieve aeroway features\n", + " aeroway = retrieve_osm_features(polygon=boundary_buffered.iloc[0], tags=aeroway_tags)\n", + " # Check if any features were retrieved\n", + " if aeroway is not None:\n", + " if not aeroway.empty:\n", + " # set/reset crs\n", + " aeroway = aeroway.to_crs('27700')\n", + "\n", + " # concat\n", + " landuse = pd.concat([landuse, nature, leisure, aeroway])\n", + "\n", + " ## resest boundary crs\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:27700')\n", + "\n", + " return landuse\n", + "\n", + "\n", + " def get_bus_routes(boundary_buffered):\n", + " \"\"\"\n", + " Retrieves bus routes from OSM/NAPTAN within a given boundary.\n", + "\n", + " Args:\n", + " boundary_buffered (GeoDataFrame): A GeoDataFrame representing the boundary.\n", + "\n", + " Returns:\n", + " bus_routes (GeoDataFrame): A GeoDataFrame containing the bus routes.\n", + "\n", + " Raises:\n", + " Exception: If there is an error fetching the data from the Overpass API.\n", + " \"\"\"\n", + " # reset boundary crs to allow for features to be found\n", + " boundary_buffered = boundary_buffered.to_crs('EPSG:4326')\n", + "\n", + " # Calculate the bounding box for XML query\n", + " bounding_box = boundary_buffered.bounds\n", + "\n", + " # Extract the minimum and maximum coordinates\n", + " minx = bounding_box['minx'].min()\n", + " miny = bounding_box['miny'].min()\n", + " maxx = bounding_box['maxx'].max()\n", + " maxy = bounding_box['maxy'].max()\n", + "\n", + " # Create a list of four elements representing the bounding box\n", + " bbox = [minx, miny, maxx, maxy]\n", + "\n", + " # reset boundary_buffer crs\n", + " boundary_buffered = boundary_buffered.to_crs('27700')\n", + "\n", + " # Define the Overpass API endpoint\n", + " overpass_url = \"https://overpass-api.de/api/interpreter\"\n", + "\n", + " # Define the XML query\n", + " xml_query = f\"\"\"\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + " \"\"\"\n", + "\n", + " # Initialize lists to store data\n", + " geometries = []\n", + " element_data = []\n", + "\n", + " # Make the Overpass API request\n", + " response = requests.post(overpass_url, data=xml_query)\n", + "\n", + " # Check if the request was successful\n", + " if response.status_code == 200:\n", + " data = response.json()\n", + "\n", + " # Access the data from the response\n", + " for element in data.get(\"elements\", []):\n", + " if element.get('type') == 'way' and 'geometry' in element:\n", + " # Extract geometry coordinates from 'geometry' field\n", + " coordinates = [(node['lon'], node['lat']) for node in element['geometry']]\n", + " # Create a LineString geometry\n", + " line = LineString(coordinates)\n", + " geometries.append(line)\n", + " element_data.append(element)\n", + "\n", + " # Create a GeoDataFrame\n", + " bus_routes = gpd.GeoDataFrame(element_data, geometry=geometries)\n", + "\n", + " # Set CRS\n", + " bus_routes = bus_routes.set_crs('4326')\n", + " bus_routes = bus_routes.to_crs('27700')\n", + "\n", + " return bus_routes\n", + "\n", + " else:\n", + " raise Exception(f\"Error fetching data: {response.status_code} - {response.text}\")\n", + "\n", + "\n", + "\n", + " def clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered):\n", + " \"\"\"\n", + " Clips the geospatial data to the boundary_buffered extent.\n", + "\n", + " Parameters:\n", + " - os_open_roads (GeoDataFrame): lines from OS Open roads.\n", + " - rivers (GeoDataFrame): lines of Rivers.\n", + " - railways (GeoDataFrame): lines of Railways.\n", + " - landuse (GeoDataFrame): Land use polygons.\n", + " - bus_routes (GeoDataFrame): lines of bus routes.\n", + " - boundary_buffered (GeoDataFrame): buffered boundary.\n", + "\n", + " Returns:\n", + " - clipped versions of input geodataframes, aside from the bufferd boundary.\n", + " \"\"\"\n", + " os_open_roads_clip = gpd.clip(os_open_roads, boundary_buffered)\n", + " rivers_clip = gpd.clip(rivers, boundary_buffered)\n", + " railways_clip = gpd.clip(railways, boundary_buffered)\n", + " landuse_clip = gpd.clip(landuse, boundary_buffered)\n", + " bus_routes_clip = gpd.clip(bus_routes, boundary_buffered)\n", + "\n", + " return os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip\n", + "\n", + "\n", + " def process_bus_routes(bus_routes_clip, buffer_distance):\n", + " \"\"\"\n", + " Count the number of bus routes per road and remove roads with more than one bus route on them.\n", + " \n", + " Args:\n", + " bus_routes_clip (GeoDataFrame): The input GeoDataFrame containing bus routes.\n", + " buffer_distance (float): The buffer distance to convert roads to polygons, set in meters.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The filtered GeoDataFrame containing roads with less than or equal to one bus route.\n", + " \"\"\"\n", + " # Create a new GeoDataFrame with the buffered geometries\n", + " bus_routes_buffered = bus_routes_clip.copy() # Copy the original GeoDataFrame\n", + " bus_routes_buffered['geometry'] = bus_routes_buffered['geometry'].buffer(buffer_distance)\n", + "\n", + " # count the number of overlapping bus routes\n", + " def count_overlapping_features(gdf):\n", + " \"\"\"\n", + " Count the number of overlapping features in a GeoDataFrame.\n", + " \n", + " Args:\n", + " gdf (GeoDataFrame): The input GeoDataFrame.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The input GeoDataFrame with an additional column 'Bus_routes_count' indicating the count of overlapping features.\n", + " \"\"\"\n", + " # Create an empty column to store the count of overlapping features\n", + " gdf['Bus_routes_count'] = 0\n", + "\n", + " # Iterate through each row in the GeoDataFrame\n", + " for idx, row in gdf.iterrows():\n", + " # Get the geometry of the current row\n", + " geometry = row['geometry']\n", + " \n", + " # Use a spatial filter to find overlapping features\n", + " overlaps = gdf[gdf['geometry'].intersects(geometry)]\n", + " \n", + " # Update the Bus_routes_count column with the count of overlapping features\n", + " gdf.at[idx, 'Bus_routes_count'] = len(overlaps)\n", + " \n", + " return gdf\n", + "\n", + " # call function\n", + " bus_routes_buffered_with_count = count_overlapping_features(bus_routes_buffered)\n", + "\n", + " # drop any roads which have less than two bus routes on them\n", + " bus_routes_filtered = bus_routes_buffered_with_count[bus_routes_buffered_with_count['Bus_routes_count'] >= 2]\n", + " \n", + " return bus_routes_filtered\n", + "\n", + "\n", + "\n", + " def filter_OS_boundary_roads(os_open_roads_clip):\n", + " \"\"\"\n", + " Filter the `os_open_roads_clip` DataFrame to select boundary roads.\n", + "\n", + " This function filters the `os_open_roads_clip` DataFrame to select roads that are considered \"boundary\" roads. \n", + " The selection criteria include roads that have the following attributes:\n", + " - `primary_route` is True\n", + " - `trunk_road` is True\n", + " - `fictitious` is True\n", + " - `road_classification` is 'A Road' or 'B Road'\n", + " - `road_function` is 'Minor Road' or 'Motorway'\n", + "\n", + " The filtered DataFrame is returned.\n", + "\n", + " Note: The commented line `(os_open_roads_clip['road_function'] == 'Restricted Local Access Road')` is excluded from the selection.\n", + "\n", + " Parameters:\n", + " - os_open_roads_clip (DataFrame): A DataFrame containing road data.\n", + "\n", + " Returns:\n", + " - boundary_roads (DataFrame): A DataFrame containing the filtered boundary roads.\n", + "\n", + " Example usage:\n", + " # Assuming `os_open_roads_clip` is a DataFrame containing road data\n", + " boundary_roads = filter_boundary_roads(os_open_roads_clip)\n", + " \"\"\"\n", + " boundary_roads = os_open_roads_clip.loc[((os_open_roads_clip['primary_route'] == 'True') |\n", + " (os_open_roads_clip['trunk_road'] == 'True') |\n", + " (os_open_roads_clip['fictitious'] == 'True') |\n", + " (os_open_roads_clip['road_classification'] == 'A Road') | \n", + " (os_open_roads_clip['road_classification'] == 'B Road') | \n", + " (os_open_roads_clip['road_function'] == 'Minor Road') |\n", + " (os_open_roads_clip['road_function'] == 'Motorway') |\n", + " (os_open_roads_clip['road_function'] == 'Minor Road') \n", + " )]\n", + " return boundary_roads\n", + "\n", + "\n", + "\n", + " ## buffering and dissolving functions\n", + " \n", + " def buffer_and_dissolve(input_gdf):\n", + " \"\"\"\n", + " Buffer and dissolve a GeoDataFrame.\n", + " \n", + " Args:\n", + " input_gdf (GeoDataFrame): The input GeoDataFrame.\n", + " \n", + " Returns:\n", + " GeoDataFrame: The buffered and dissolved GeoDataFrame.\n", + " \"\"\"\n", + " # Buffer around boundaries\n", + " buffered_gdf = input_gdf.copy() # Create a copy to avoid modifying the original\n", + " buffered_gdf['geometry'] = buffered_gdf['geometry'].buffer(5) # set a 5 meter buffer\n", + "\n", + " # Dissolve the geometries\n", + " dissolved_geo = buffered_gdf.unary_union\n", + "\n", + " # Create a new GeoDataFrame with the dissolved geometry\n", + " dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo])\n", + "\n", + " # Set the CRS (Coordinate Reference System)\n", + " dissolved_gdf.crs = input_gdf.crs\n", + "\n", + " return dissolved_gdf\n", + "\n", + "\n", + " def dissolve_gdf(input_gdf):\n", + " # dissolve geometries\n", + " dissolved_geo = input_gdf.unary_union\n", + " dissolved_gdf = gpd.GeoDataFrame(geometry=[dissolved_geo])\n", + " dissolved_gdf.crs = input_gdf.crs\n", + "\n", + " return dissolved_gdf\n", + "\n", + "\n", + " def erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf):\n", + " \"\"\"\n", + " Erases boundary features from the given boundary geometry.\n", + "\n", + " Parameters:\n", + " - boundary: GeoDataFrame representing the boundary geometry\n", + " - boundary_rivers_bd: GeoDataFrame representing the rivers boundary features\n", + " - boundary_roads_bd: GeoDataFrame representing the roads boundary features\n", + " - boundary_rail_bd: GeoDataFrame representing the rail boundary features\n", + " - boundary_landuse_bd: GeoDataFrame representing the landuse boundary features\n", + " - boundary_bus_routes_bd: GeoDataFrame representing the bus routes boundary features\n", + "\n", + " Returns:\n", + " - erased_boundary_gdf: GeoDataFrame containing the result of the \"Erase\" operation\n", + " \"\"\"\n", + "\n", + " # ensure that neighbourhoods fall only within urban footprint areas\n", + " boundary = gpd.clip(boundary, guf_residential_gdf)\n", + "\n", + " # Join all boundary features\n", + " boundaries = pd.concat([boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd], ignore_index=True)\n", + " boundary_features = dissolve_gdf(boundaries)\n", + "\n", + " # Use the `difference` method to perform the \"Erase\" operation\n", + " erased_boundary = boundary.difference(boundary_features.unary_union)\n", + "\n", + " # Convert the GeoSeries to a single geometry using unary_union\n", + " erased_boundary = erased_boundary.unary_union\n", + "\n", + " # Create a new GeoDataFrame with the result of \"Erase\" operation\n", + " erased_boundary_gdf = gpd.GeoDataFrame(geometry=[erased_boundary], crs=boundary.crs)\n", + "\n", + " # Explode multipolygon to polygons\n", + " erased_boundary_gdf = erased_boundary_gdf.explode()\n", + "\n", + " return erased_boundary_gdf\n", + "\n", + "\n", + " def drop_large_or_small_areas(neighbourhoods):\n", + " \"\"\"\n", + " Drops rows from the 'neighbourhoods' DataFrame where the area is less than 10,000 square units or greater than 5,000,000 square units.\n", + "\n", + " Parameters:\n", + " - neighbourhoods (DataFrame): The input DataFrame containing neighbourhood data.\n", + "\n", + " Returns:\n", + " - neighbourhoods (DataFrame): The updated DataFrame with small areas dropped.\n", + " \"\"\"\n", + " # Calculate area\n", + " neighbourhoods[\"area\"] = neighbourhoods.geometry.area\n", + "\n", + " # Drop rows where area is less than 10,000 or greater than 5,000,000\n", + " neighbourhoods = neighbourhoods.loc[(neighbourhoods[\"area\"] >= 10000)]\n", + " neighbourhoods = neighbourhoods.loc[(neighbourhoods[\"area\"] <= 5000000)]\n", + "\n", + " return neighbourhoods\n", + "\n", + "\n", + " def filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, polygon_column_name):\n", + " \"\"\"\n", + " Count the number of roads within each polygon in a GeoDataFrame and filter the neighbourhoods based on road count and road density.\n", + " \n", + " Args:\n", + " neighbourhoods (GeoDataFrame): GeoDataFrame containing neighbourhood polygons.\n", + " os_open_roads_clip (GeoDataFrame): GeoDataFrame containing road data.\n", + " polygon_column_name (str): Name of the column in neighbourhoods to use for grouping.\n", + "\n", + " Returns:\n", + " GeoDataFrame: Updated neighbourhoods GeoDataFrame with filtered rows based on road count and road density.\n", + " \"\"\"\n", + " \n", + " def count_roads_within_polygons(polygons_gdf, roads_gdf, polygon_column_name):\n", + " \"\"\"\n", + " Count the number of roads within each polygon in a GeoDataFrame.\n", + " \n", + " Args:\n", + " polygons_gdf (GeoDataFrame): GeoDataFrame containing polygons.\n", + " roads_gdf (GeoDataFrame): GeoDataFrame containing roads.\n", + " polygon_column_name (str): Name of the column in polygons_gdf to use for grouping.\n", + "\n", + " Returns:\n", + " GeoDataFrame: Original polygons GeoDataFrame with a \"road_count\" column added.\n", + " \"\"\"\n", + " \n", + " # spatial join\n", + " joined = gpd.sjoin(polygons_gdf, roads_gdf, how='left', op='intersects')\n", + " \n", + " # Group by the polygon column and count the number of roads in each\n", + " road_counts = joined.groupby(polygon_column_name).size().reset_index(name='road_count')\n", + " \n", + " # Merge the road counts back into the polygons GeoDataFrame\n", + " polygons_gdf = polygons_gdf.merge(road_counts, on=polygon_column_name, how='left')\n", + "\n", + " # Calculate road density (area divided by road_count). It is multiplied by 10000 for ease of understanding the numbers involved with this\n", + " polygons_gdf['road_density'] = (polygons_gdf['road_count'] / polygons_gdf['area'] ) * 10000\n", + " \n", + " return polygons_gdf\n", + " \n", + " neighbourhoods = count_roads_within_polygons(neighbourhoods, os_open_roads_clip, polygon_column_name)\n", + "\n", + " # Drop rows with road_density below 0.2 or less than 4 roads\n", + " neighbourhoods = neighbourhoods[(neighbourhoods['road_count'] > 2)]\n", + " neighbourhoods = neighbourhoods[(neighbourhoods['road_density'] > 0.2)]\n", + " \n", + " return neighbourhoods\n", + "\n", + "\n", + " def remove_holes(polygon):\n", + " \"\"\"\n", + " Removes holes from a polygon. Mostly for visual reasons.\n", + "\n", + " Parameters:\n", + " polygon (Polygon): The polygon to remove holes from.\n", + "\n", + " Returns:\n", + " Polygon: The polygon without holes.\n", + " \"\"\"\n", + " if polygon.geom_type == 'Polygon':\n", + " return Polygon(polygon.exterior)\n", + " else:\n", + " return polygon\n", + "\n", + " landuse = get_landuse(boundary_buffered)\n", + " rivers = get_rivers(boundary_buffered)\n", + " railways = get_railways(place)\n", + " landuse = get_landuse(boundary_buffered)\n", + " bus_routes = get_bus_routes(boundary_buffered)\n", + " os_open_roads_clip, rivers_clip, railways_clip, landuse_clip, bus_routes_clip = clip_boundaries(os_open_roads, rivers, railways, landuse, bus_routes, boundary_buffered)\n", + " bus_routes_clip = process_bus_routes(bus_routes_clip, 0.2)\n", + " boundary_roads = filter_OS_boundary_roads(os_open_roads_clip)\n", + "\n", + " ## buffer and dissolve \n", + " boundary_roads_bd = buffer_and_dissolve(boundary_roads)\n", + " boundary_rivers_bd = buffer_and_dissolve(rivers_clip)\n", + " boundary_rail_bd = buffer_and_dissolve(railways_clip)\n", + " boundary_landuse_bd = buffer_and_dissolve(landuse_clip)\n", + " boundary_bus_routes_bd = buffer_and_dissolve(bus_routes_clip)\n", + "\n", + " ## geodataframe cleaning\n", + " erased_boundary_gdf = erase_boundary_features(boundary, boundary_rivers_bd, boundary_roads_bd, boundary_rail_bd, boundary_landuse_bd, boundary_bus_routes_bd, guf_residential_gdf)\n", + " neighbourhoods = erased_boundary_gdf\n", + " neighbourhoods = drop_large_or_small_areas(neighbourhoods)\n", + "\n", + " neighbourhoods = filter_neighbourhoods_by_roads(neighbourhoods, os_open_roads_clip, 'geometry')\n", + "\n", + " ## create unique IDs\n", + " # simple number based ID\n", + " neighbourhoods['ID'] = range(1, len(neighbourhoods) + 1)\n", + "\n", + " neighbourhoods['geometry'] = neighbourhoods['geometry'].apply(remove_holes)\n", + "\n", + "\n", + " ## filter neighbourhoods to only locations with more than 1 intersection (1 or fewer intersections indicates that all travel modes will be the same)\n", + " # reset neighbourhoods crs\n", + " neighbourhoods = neighbourhoods.to_crs('4326')\n", + "\n", + " # Spatial join to count points within each neighborhood\n", + " spatial_join = gpd.sjoin(neighbourhoods, common_nodes_gdf, how='left', op='contains')\n", + "\n", + " # Group by 'ID' and count the points within each neighborhood\n", + " point_counts = spatial_join.groupby('ID').size().reset_index(name='point_count')\n", + "\n", + " # Filter out neighborhoods with 1 or 0 points\n", + " filtered_neighbourhood_ids = point_counts[point_counts['point_count'] > 1]['ID']\n", + "\n", + " neighbourhoods= neighbourhoods[neighbourhoods['ID'].isin(filtered_neighbourhood_ids)]\n", + "\n", + "\n", + "\n", + " ## we also need to join the length of the streets within the neighbourhood for further analysis\n", + " # Reset index of neighbourhoods\n", + " neighbourhoods = neighbourhoods.reset_index(drop=True)\n", + "\n", + " # reset neighbourhoods crs\n", + " neighbourhoods = neighbourhoods.to_crs('27700')\n", + "\n", + " # Perform a spatial join\n", + " joined_data = gpd.sjoin(os_open_roads_clip, neighbourhoods, how=\"inner\", op=\"intersects\")\n", + "\n", + " # Group by neighborhood and calculate total road length\n", + " road_lengths = joined_data.groupby('index_right')['length'].sum().reset_index()\n", + "\n", + " # Merge road_lengths with neighbourhoods and drop 'index_right' column\n", + " neighbourhoods = neighbourhoods.merge(road_lengths, left_index=True, right_on='index_right', how='left').drop(columns=['index_right'])\n", + "\n", + " # Rename the column\n", + " neighbourhoods.rename(columns={'length': 'road_lengths'}, inplace=True)\n", + "\n", + "\n", + "\n", + " ### find accessiablity\n", + "\n", + " ## all to all\n", + " def calculate_distance_stats_from_points(points_gdf, network):\n", + " all_pairs_shortest_paths = {}\n", + " points_osmids = points_gdf.index.tolist() # Assuming the 'osmid' is the index in the GeoDataFrame\n", + "\n", + " for start_node in points_osmids:\n", + " shortest_paths = {}\n", + " try:\n", + " for end_node in points_osmids:\n", + " if start_node != end_node:\n", + " distance = nx.shortest_path_length(network, start_node, end_node, weight='length')\n", + " shortest_paths[end_node] = distance\n", + " all_pairs_shortest_paths[start_node] = shortest_paths\n", + " except nx.NetworkXNoPath:\n", + " # If no path is found, skip adding to all_pairs_shortest_paths\n", + " continue\n", + "\n", + " distances = [length for paths in all_pairs_shortest_paths.values() for length in paths.values()]\n", + "\n", + " mean_distance = statistics.mean(distances)\n", + " median_distance = statistics.median(distances)\n", + " min_distance = min(distances)\n", + " max_distance = max(distances)\n", + " distance_range = max_distance - min_distance\n", + " total_distance = sum(distances)\n", + "\n", + " return {\n", + " \"mean_distance\": mean_distance,\n", + " \"median_distance\": median_distance,\n", + " \"min_distance\": min_distance,\n", + " \"max_distance\": max_distance,\n", + " \"distance_range\": distance_range,\n", + " \"total_distance\": total_distance\n", + " }\n", + "\n", + " ## processing for all to all \n", + " results = []\n", + "\n", + " for index, row in neighbourhoods.iterrows():\n", + " neighbourhood = neighbourhoods.loc[[index]]\n", + "\n", + " ## get neighbourhood boundary and neighbourhood boundary buffer\n", + " # set crs\n", + " neighbourhood = neighbourhood.to_crs('27700')\n", + " # create a buffer neighbourhood\n", + " neighbourhood_buffer = neighbourhood['geometry'].buffer(15)\n", + " # convert back to a geodataframe (for later on)\n", + " neighbourhood_buffer = gpd.GeoDataFrame(geometry=neighbourhood_buffer)\n", + " # reset crs\n", + " neighbourhood, neighbourhood_buffer = neighbourhood.to_crs('4326'), neighbourhood_buffer.to_crs('4326')\n", + "\n", + "\n", + " ## get nodes which can be driven to and walked to within area\n", + " neighbourhood_nodes = gpd.clip(common_nodes_gdf, neighbourhood_buffer)\n", + "\n", + " ## get length of total edges within the neighbourhood\n", + " edges_within_neighbourhood = gpd.sjoin(all_edges, neighbourhood, how=\"inner\", op=\"intersects\")\n", + " total_length = edges_within_neighbourhood['length'].sum()\n", + "\n", + "\n", + " ## calculate neighbourhood distance stats for walking and driving\n", + " walk_stats = calculate_distance_stats_from_points(neighbourhood_nodes, walk_streets)\n", + " drive_stats = calculate_distance_stats_from_points(neighbourhood_nodes, drive_streets)\n", + "\n", + "\n", + " ## Add the statistics to the GeoDataFrame\n", + " neighbourhood['walk_mean_distance'] = walk_stats['mean_distance']\n", + " neighbourhood['walk_median_distance'] = walk_stats['median_distance']\n", + " neighbourhood['walk_min_distance'] = walk_stats['min_distance']\n", + " neighbourhood['walk_max_distance'] = walk_stats['max_distance']\n", + " neighbourhood['walk_distance_range'] = walk_stats['distance_range']\n", + " neighbourhood['walk_total_distance'] = walk_stats['total_distance']\n", + "\n", + " neighbourhood['drive_mean_distance'] = drive_stats['mean_distance']\n", + " neighbourhood['drive_median_distance'] = drive_stats['median_distance']\n", + " neighbourhood['drive_min_distance'] = drive_stats['min_distance']\n", + " neighbourhood['drive_max_distance'] = drive_stats['max_distance']\n", + " neighbourhood['drive_distance_range'] = drive_stats['distance_range']\n", + " neighbourhood['drive_total_distance'] = drive_stats['total_distance']\n", + "\n", + " ## Store statistics along with neighborhood ID or other identifying information\n", + " result = {\n", + " 'neighbourhood_id': neighbourhood['ID'].iloc[0], # Assuming you have an ID column\n", + " 'walk_mean_distance': walk_stats['mean_distance'],\n", + " 'walk_median_distance': walk_stats['median_distance'],\n", + " 'walk_total_distance': walk_stats['total_distance'],\n", + " \n", + "\n", + " 'drive_mean_distance': drive_stats['mean_distance'],\n", + " 'drive_median_distance': drive_stats['median_distance'],\n", + " 'drive_total_distance': drive_stats['total_distance'],\n", + "\n", + " 'total_edge_length': total_length\n", + " }\n", + " results.append(result)\n", + "\n", + " ## Convert the results to a new dataframe\n", + " results_df = pd.DataFrame(results)\n", + "\n", + "\n", + " ## calculate differances\n", + "\n", + " results_df['mean_distance_diff'] = results_df['walk_mean_distance'] - results_df['drive_mean_distance']\n", + " results_df['median_distance_diff'] = results_df['walk_median_distance'] - results_df['drive_median_distance']\n", + " results_df['total_distance_diff'] = results_df['walk_total_distance'] - results_df['drive_total_distance']\n", + "\n", + " merged_df = pd.merge(neighbourhoods, results_df, left_on = \"ID\", right_on = \"neighbourhood_id\")\n", + " access_results_gdf = gpd.GeoDataFrame(merged_df, geometry='geometry')\n", + "\n", + "\n", + "\n", + "\n", + " def get_barriers(boundary):\n", + " \"\"\"\n", + " Find modal filters within a given boundary.\n", + "\n", + " Args:\n", + " boundary (geopandas.GeoDataFrame): A GeoDataFrame representing the boundary.\n", + "\n", + " Returns:\n", + " barriers (geopandas.GeoDataFrame): A GeoDataFrame containing the modal filters.\n", + " streets_gdf (geopandas.GeoDataFrame): A GeoDataFrame containing the streets from OSM.\n", + "\n", + " \"\"\"\n", + "\n", + " # get the boundary in the correct CRS for OSMnx\n", + " boundary_4326 = boundary.to_crs('EPSG:4326')\n", + "\n", + " # get the most \"basic\" filters mapped, the barriers/bollards etc\n", + " # Define tags\n", + " tags = {\"barrier\": [\"bollard\", \"bus_trap\", \"entrance\", \"planter\", \"sump_buster\", \"wedge\"]}\n", + "\n", + " # Use ox.features_from_polygon to find features matching the specified tags\n", + " barriers = ox.features_from_polygon(polygon=boundary_4326.geometry.iloc[0], tags=tags)\n", + "\n", + " # process any linestrings into point geometries\n", + " # Filter the GeoDataFrame to select only rows with \"linestring\" geometry\n", + " barriers_linestrings = barriers[barriers['geometry'].geom_type == 'LineString']\n", + "\n", + " # Create an empty GeoDataFrame to store the individual points\n", + " points_gdf = gpd.GeoDataFrame(columns=list(barriers_linestrings.columns), crs=barriers_linestrings.crs)\n", + "\n", + " # Iterate through each row in the GeoDataFrame with linestrings\n", + " for idx, row in barriers_linestrings.iterrows():\n", + " if isinstance(row['geometry'], LineString):\n", + " # Extract the individual points from the linestring\n", + " points = [Point(coord) for coord in list(row['geometry'].coords)]\n", + "\n", + " # Create a GeoDataFrame from the individual points and copy the attributes\n", + " points_df = gpd.GeoDataFrame(geometry=points, crs=barriers_linestrings.crs)\n", + " for col in barriers_linestrings.columns:\n", + " if col != 'geometry':\n", + " points_df[col] = row[col]\n", + "\n", + " # Rename the \"geometry\" column to \"merged_geometry\"\n", + " points_df = points_df.rename(columns={'geometry': 'merged_geometry'})\n", + "\n", + " # Append the points to the points_gdf\n", + " points_gdf = pd.concat([points_gdf, points_df], ignore_index=True)\n", + "\n", + " # Now, points_gdf contains all the individual points from the linestrings with inherited attributes\n", + "\n", + " # Remove the \"geometry\" column from the points GeoDataFrame\n", + " points_gdf = points_gdf.drop(columns=['geometry'])\n", + "\n", + " # Remove the linestring rows from the original GeoDataFrame\n", + " barriers = barriers[barriers['geometry'].geom_type != 'LineString']\n", + "\n", + " # Rename the \"merged_geometry\" column to \"geometry\" in the points GeoDataFrame\n", + " points_gdf = points_gdf.rename(columns={'merged_geometry': 'geometry'})\n", + "\n", + " # Concatenate the individual points GeoDataFrame to the original GeoDataFrame\n", + " barriers = pd.concat([barriers, points_gdf], ignore_index=True)\n", + "\n", + " # Reset the index to ensure it is continuous\n", + " barriers.reset_index(drop=True, inplace=True)\n", + "\n", + " # Create a new column \"previously_linestring\" and set it to False initially\n", + " barriers['previously_linestring'] = False\n", + "\n", + " # Iterate through each row in the GeoDataFrame with linestrings\n", + " for idx, row in barriers_linestrings.iterrows():\n", + " if isinstance(row['geometry'], LineString):\n", + " # Extract the individual points from the linestring\n", + " points = [Point(coord) for coord in list(row['geometry'].coords)]\n", + "\n", + " # Iterate through the points in the linestring\n", + " for point in points:\n", + " # Check if the point's geometry intersects with any of the original linestrings\n", + " mask = barriers['geometry'].intersects(point)\n", + " if mask.any():\n", + " # If the point intersects with any linestring, set \"previously_linestring\" to True\n", + " barriers.loc[mask, 'previously_linestring'] = True\n", + "\n", + " # add a unique ID\n", + " barriers['barrier_id'] = range(1, len(barriers) + 1)\n", + "\n", + " # Convert the OSMnx graph to a GeoDataFrame of streets\n", + " streets_gdf = ox.graph_to_gdfs(all_streets, nodes=False, edges=True)\n", + "\n", + " # join the barriers to the streets\n", + " streets_gdf = gpd.sjoin(streets_gdf, barriers, how=\"left\", op=\"intersects\")\n", + "\n", + " # clean geodataframe and drop streets without a barrier\n", + " streets_gdf.columns = streets_gdf.columns.str.replace(\"_right\", \"_barrier\").str.replace(\"_left\", \"_street\")\n", + " # we need to double check the name of \"barrier\"\n", + " streets_gdf['barrier_barrier'] = streets_gdf['barrier'] if 'barrier' in streets_gdf.columns else streets_gdf[\n", + " 'barrier_barrier']\n", + "\n", + " if 'name_street' in streets_gdf.columns:\n", + " streets_gdf = streets_gdf.rename(columns={'name_street': 'name'})\n", + " barrier_streets = streets_gdf.dropna(subset=['barrier_barrier'])\n", + "\n", + " # add barrier tag\n", + " barrier_streets['filter_type'] = 'barrier or bollard'\n", + "\n", + " ## extract points which are on/within 1m of streets only\n", + " streets_gdf['has_barrier'] = 'yes'\n", + "\n", + " # reset crs before spatial join\n", + " barriers, streets_gdf = barriers.to_crs(3857), streets_gdf.to_crs(3857)\n", + "\n", + " barriers = gpd.sjoin_nearest(barriers, streets_gdf, how=\"left\", max_distance=1)\n", + " barriers = barriers.dropna(subset=['has_barrier'])\n", + " barriers = barriers.reset_index(drop=True) # Reset the index\n", + "\n", + " # Dissolve based on the 'geometry' column\n", + " barriers = barriers.dissolve(by='barrier_id_right')\n", + "\n", + " # add barrier tag\n", + " barriers['filter_type'] = 'barrier or bollard'\n", + "\n", + " # Reset the index to remove multi-index\n", + " barriers.reset_index(drop=True, inplace=True)\n", + "\n", + " return barriers, streets_gdf\n", + "\n", + "\n", + " def get_bus_gates(streets_gdf):\n", + " \"\"\"\n", + " Finds all the bus gates within the given streets GeoDataFrame.\n", + "\n", + " Parameters:\n", + " streets_gdf (GeoDataFrame): A GeoDataFrame containing street data.\n", + "\n", + " Returns:\n", + " busgates (GeoDataFrame): A GeoDataFrame containing the bus gates found in the streets data.\n", + "\n", + " \"\"\"\n", + "\n", + " # we need to double check the name of \"access\"\n", + " streets_gdf['access_street'] = streets_gdf['access'] if 'access' in streets_gdf.columns else streets_gdf['access_street']\n", + " streets_gdf['bicycle_street'] = streets_gdf['bicycle'] if 'bicycle' in streets_gdf.columns else streets_gdf['bicycle_street']\n", + " streets_gdf['bus'] = streets_gdf['bus_street'] if 'bus_street' in streets_gdf.columns else streets_gdf['bus']\n", + "\n", + " busgates = streets_gdf[((streets_gdf[\"bus\"] == \"yes\") & (streets_gdf[\"access_street\"] == \"no\") & (streets_gdf[\"bicycle_street\"] == \"yes\")) |\n", + " (streets_gdf[\"bus\"] == \"yes\") & (streets_gdf[\"motor_vehicle_street\"] == \"no\") & (streets_gdf[\"bicycle_street\"] == \"yes\")\n", + " ]\n", + "\n", + " # add bus gate tag\n", + " busgates['filter_type'] = 'bus gate'\n", + "\n", + " return busgates, streets_gdf\n", + "\n", + " def get_contraflows(streets_gdf):\n", + " \"\"\"\n", + " Finds the unrestricted one-way streets for cycling but restricted for cars.\n", + "\n", + " Parameters:\n", + " streets_gdf (GeoDataFrame): A GeoDataFrame containing street data.\n", + "\n", + " Returns:\n", + " GeoDataFrame: A GeoDataFrame containing the unrestricted one-way streets for cycling.\n", + " \"\"\"\n", + "\n", + " # Find one-way streets where cycling is unrestricted but cars are restricted\n", + " oneways = streets_gdf[(streets_gdf[\"oneway\"] == True) & (streets_gdf[\"oneway:bicycle\"] == \"no\")]\n", + "\n", + " # Dissolve the roads with the same name to avoid miscounting the total number of oneways\n", + " oneways['name'] = oneways['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + " oneways = oneways.dissolve(by='name')\n", + "\n", + " # Reset the index\n", + " oneways = oneways.reset_index()\n", + "\n", + " # Add one-way bike tag\n", + " oneways['filter_type'] = 'one-way bike'\n", + "\n", + " return oneways\n", + "\n", + "\n", + "\n", + " def filter_streets_continuations(input_gdf):\n", + " ## clean dataframe\n", + " # Check if 'highway_street' column exists and rename it to 'highway'\n", + " if 'highway_street' in input_gdf.columns:\n", + " input_gdf.rename(columns={'highway_street': 'highway'}, inplace=True)\n", + "\n", + "\n", + "\n", + "\n", + " # filter dataframe \n", + " ## remove indoor roads, these are likey pedestrian only however often don't have any \"cycling\" related tag\n", + " if 'covered' in input_gdf.columns:\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'covered' in str(x))]\n", + " input_gdf = input_gdf[input_gdf['covered'] != 'yes']\n", + " ## also remove footways and steps, as these are almost pedestrain only, never cyclable\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'footway' in str(x))]\n", + " input_gdf = input_gdf[~input_gdf['highway'].apply(lambda x: 'steps' in str(x))]\n", + "\n", + "\n", + "\n", + " ## clean dataframe\n", + " input_gdf['name'] = input_gdf['name'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + " input_gdf['highway'] = input_gdf['highway'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n", + "\n", + "\n", + "\n", + "\n", + " ## perform street continunation filtering\n", + " # Grouping by 'name' and checking for groups with 'pedestrian' and another highway type\n", + " grouped = input_gdf.groupby('name').filter(lambda x: any('pedestrian' in val for val in x['highway']) and len(x['highway'].unique()) > 1)\n", + " street_continuations_gdf = grouped[grouped['highway'].str.contains('pedestrian', case=False, na=False)] # Extracting the rows containing 'pedestrian' in the highway column\n", + "\n", + " ## deal with nan names\n", + "\n", + "\n", + " ## dissolve lines that are very very close to each other\n", + " if not street_continuations_gdf.empty:\n", + " street_continuations_gdf = street_continuations_gdf.to_crs('27700')\n", + " street_continuations_gdf['buffer'] = street_continuations_gdf.geometry.buffer(1)\n", + " dissolved = street_continuations_gdf.dissolve(by='name')\n", + " \n", + " # If a MultiPolygon is formed, convert it to individual polygons\n", + " if isinstance(dissolved.geometry.iloc[0], MultiPolygon):\n", + " dissolved = dissolved.explode()\n", + " \n", + " # Remove the buffer column\n", + " dissolved = dissolved.drop(columns='buffer')\n", + " street_continuations_gdf = dissolved.to_crs('4326')\n", + "\n", + " return street_continuations_gdf\n", + "\n", + "\n", + "\n", + " barriers, streets_gdf = get_barriers(boundary)\n", + " busgates, streets_gdf = get_bus_gates(streets_gdf)\n", + " oneways = get_contraflows(streets_gdf)\n", + " streets_continuations_gdf = filter_streets_continuations(streets_gdf)\n", + "\n", + " # add street conitinuation tag\n", + " streets_continuations_gdf['filter_type'] = 'street continuation'\n", + "\n", + "\n", + " ## ensure correct crs\n", + " barriers, busgates, oneways, streets_continuations_gdf = barriers.to_crs('4326'), busgates.to_crs('4326'), oneways.to_crs('4326'), streets_continuations_gdf.to_crs('4326')\n", + "\n", + " filters = gpd.GeoDataFrame(pd.concat([barriers, busgates, oneways, streets_continuations_gdf], ignore_index=True))\n", + "\n", + "\n", + "\n", + " ## alter neighbourhoods before joining\n", + " # Reset neighbourhood CRS\n", + " filters_results_gdf = neighbourhoods.to_crs('EPSG:27700')\n", + "\n", + " # Buffer to ensure all filters are captured\n", + " filters_results_gdf['geometry'] = filters_results_gdf['geometry'].buffer(5)\n", + "\n", + " # Reset neighbourhood CRS\n", + " filters_results_gdf = filters_results_gdf.to_crs('EPSG:4326')\n", + "\n", + " ## Spatial join\n", + " # Perform a spatial join between neighbourhoods and filters\n", + " joined_data = gpd.sjoin(filters_results_gdf, filters, how=\"left\", predicate=\"intersects\", lsuffix='_neigh', rsuffix='_filt')\n", + "\n", + " # Count the number of each filter within each neighbourhood\n", + " filter_type_counts = joined_data.groupby(['ID', 'filter_type']).size().unstack(fill_value=0)\n", + "\n", + " # Reset the index to make it more readable\n", + " filter_type_counts = filter_type_counts.reset_index()\n", + "\n", + " # Merge the filter_type_counts DataFrame with the neighbourhoods GeoDataFrame on the ID column\n", + " filters_results_gdf = filters_results_gdf.merge(filter_type_counts, on='ID', how='left')\n", + "\n", + " # Define the columns to sum\n", + " columns_to_sum = ['barrier or bollard', 'one-way bike', 'bus gate', 'street continuation']\n", + "\n", + " # Filter out columns that exist in the DataFrame\n", + " existing_columns = [col for col in columns_to_sum if col in filters_results_gdf.columns]\n", + "\n", + " # Sum the values in the existing columns per row\n", + " filters_results_gdf['total_filter_types'] = filters_results_gdf[existing_columns].sum(axis=1)\n", + "\n", + " # Fill NaN values with 0 if necessary\n", + " filters_results_gdf = filters_results_gdf.fillna(0)\n", + "\n", + " # Find locations where filters are found dense\n", + " # Convert road density to numeric if not already\n", + " filters_results_gdf['road_density'] = pd.to_numeric(filters_results_gdf['road_density'], errors='coerce')\n", + "\n", + " # Create new column to hold filters * density value\n", + " filters_results_gdf['filter_road_density'] = filters_results_gdf['total_filter_types'] * filters_results_gdf['road_density']\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " #### rat runs\n", + "\n", + "\n", + " drive_g = ox.graph_from_place(place, network_type='drive', simplify=True)\n", + "\n", + " ## Clean graph and calculate travel times along edges\n", + "\n", + " # Function to clean 'maxspeed' values\n", + " def clean_maxspeed(maxspeed):\n", + " if maxspeed is None:\n", + " return 30 # Replace None with a default value of 30\n", + " elif isinstance(maxspeed, str) and ' mph' in maxspeed:\n", + " return float(maxspeed.replace(' mph', ''))\n", + " elif isinstance(maxspeed, list): # Handle cases where 'maxspeed' is a list\n", + " return [float(speed.replace(' mph', '')) for speed in maxspeed]\n", + " else:\n", + " return maxspeed\n", + "\n", + " # Apply the function to 'maxspeed' in each edge attribute\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " if 'maxspeed' in data:\n", + " data['maxspeed'] = clean_maxspeed(data['maxspeed'])\n", + " else:\n", + " data['maxspeed'] = 30 # Assign default value of 30 if 'maxspeed' is missing\n", + "\n", + " # Function to convert 'maxspeed' to a numeric value\n", + " def convert_maxspeed(maxspeed):\n", + " if isinstance(maxspeed, list) and maxspeed: # Check if 'maxspeed' is a non-empty list\n", + " # If 'maxspeed' is a list, convert the first value to a numeric value\n", + " return convert_single_maxspeed(maxspeed[0])\n", + " else:\n", + " # If 'maxspeed' is not a list or an empty list, convert the single value to a numeric value\n", + " return convert_single_maxspeed(maxspeed)\n", + "\n", + " # Helper function to convert a single maxspeed value to a numeric value\n", + " def convert_single_maxspeed(maxspeed):\n", + " if maxspeed is None:\n", + " return 30 # Replace None with a default value of 30\n", + "\n", + " if isinstance(maxspeed, str):\n", + " # Extract numeric part of the string using regular expression\n", + " numeric_part = ''.join(c for c in maxspeed if c.isdigit() or c == '.')\n", + " return float(numeric_part) if numeric_part else 30 # Default value if no numeric part found\n", + " elif isinstance(maxspeed, (int, float)):\n", + " return maxspeed\n", + " else:\n", + " return 30 # Default value if the type is unknown\n", + "\n", + " # Function to calculate travel time\n", + " def calculate_travel_time(length, maxspeed):\n", + " # Convert 'maxspeed' to a numeric value\n", + " maxspeed_value = convert_maxspeed(maxspeed)\n", + "\n", + " # Convert maxspeed to meters per second\n", + " speed_mps = maxspeed_value * 0.44704 # 1 mph = 0.44704 m/s\n", + "\n", + " # Calculate travel time in seconds using the formula: time = distance/speed\n", + " if length is not None and speed_mps > 0:\n", + " travel_time = length / speed_mps\n", + " return travel_time\n", + " else:\n", + " return None\n", + "\n", + " # Apply the function to 'length' and 'maxspeed' in each edge attribute\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " if 'length' in data:\n", + " data['travel_time'] = calculate_travel_time(data.get('length'), data.get('maxspeed'))\n", + "\n", + "\n", + "\n", + " def get_sparse_graph(drive_g):\n", + " \"\"\"\n", + " Create a sparse graph from bounding roads.\n", + "\n", + " Args:\n", + " drive_g (networkx.Graph): The original graph.\n", + "\n", + " Returns:\n", + " networkx.Graph: The sparse graph.\n", + " \"\"\"\n", + " # Create a copy of the original graph\n", + " sparse_drive_g = drive_g.copy()\n", + "\n", + " # Define the conditions for keeping edges\n", + " conditions = [\n", + " (\n", + " data.get('highway') in ['trunk', 'trunk_link', 'motorway', 'motorway_link', 'primary', 'primary_link',\n", + " 'secondary', 'secondary_link', 'tertiary', 'tertiary_link']\n", + " ) or (\n", + " data.get('maxspeed') in ['60', '70', '40', ('20', '50'), ('30', '60'), ('30', '50'), ('70', '50'),\n", + " ('40', '60'), ('70', '60'), ('60', '40'), ('50', '40'), ('30', '40'),\n", + " ('20', '60'), ('70 ', '40 '), ('30 ', '70')]\n", + " )\n", + " for u, v, k, data in sparse_drive_g.edges(keys=True, data=True)\n", + " ]\n", + "\n", + " # Keep only the edges that satisfy the conditions\n", + " edges_to_remove = [\n", + " (u, v, k) for (u, v, k), condition in zip(sparse_drive_g.edges(keys=True), conditions) if not condition\n", + " ]\n", + " sparse_drive_g.remove_edges_from(edges_to_remove)\n", + "\n", + " # Clean nodes by removing isolated nodes from the graph\n", + " isolated_nodes = list(nx.isolates(sparse_drive_g))\n", + " sparse_drive_g.remove_nodes_from(isolated_nodes)\n", + "\n", + " return sparse_drive_g\n", + "\n", + "\n", + "\n", + " sparse_drive_g = get_sparse_graph(drive_g)\n", + "\n", + "\n", + " #print(\"Number of edges in the sparse graph:\", sparse_drive_g.number_of_edges())\n", + "\n", + "\n", + "\n", + " ## create a partitioned network (using the full graph and the sparse graph)\n", + "\n", + " # Make a copy of the original graph\n", + " drive_g_copy = drive_g.copy()\n", + "\n", + " ## Remove edges \n", + " drive_g_copy.remove_edges_from(sparse_drive_g.edges)\n", + "\n", + " ## Remove nodes\n", + " # Convert nodes to strings\n", + " sparse_drive_nodes_str = [str(node) for node in sparse_drive_g.nodes]\n", + " drive_g_copy.remove_nodes_from(sparse_drive_nodes_str)\n", + "\n", + " # clean nodes by removing isolated nodes from the graph\n", + " isolated_nodes = list(nx.isolates(drive_g_copy))\n", + " drive_g_copy.remove_nodes_from(isolated_nodes)\n", + "\n", + " len(drive_g_copy)\n", + "\n", + "\n", + "\n", + "\n", + " ## partition the full graph, by removing the sparse graph from it.\n", + "\n", + " # first nodes shared between sparse_drive_g and drive_g (these nodes are the connection between neighbourhoods and boundary roads)\n", + " shared_nodes = set(sparse_drive_g.nodes).intersection(drive_g_copy.nodes)\n", + "\n", + "\n", + " # we then need to remove nodes where junctions between two neighbourhood nodes and sparse graphs are present. \n", + " # we do this by adding new nodes the end of edges which intersect with the sparse graph, to split these junctions up\n", + " # Initialize a counter to generate unique indices for new nodes\n", + " node_counter = Counter()\n", + " # Iterate through shared nodes\n", + " for shared_node in shared_nodes:\n", + " # Find edges in drive_g connected to the shared node\n", + " drive_g_edges = list(drive_g_copy.edges(shared_node, data=True, keys=True))\n", + "\n", + " # Find edges in sparse_drive_g connected to the shared node\n", + " sparse_drive_g_edges = list(sparse_drive_g.edges(shared_node, data=True, keys=True))\n", + "\n", + " # Iterate through edges in drive_g connected to the shared node\n", + " for u, v, key, data in drive_g_edges:\n", + " # Check if the corresponding edge is not in sparse_drive_g\n", + " if (u, v, key) not in sparse_drive_g_edges:\n", + " # Create new end nodes for the edge in drive_g\n", + " new_u = f\"new_{u}\" if u == shared_node else u\n", + " new_v = f\"new_{v}\" if v == shared_node else v\n", + "\n", + " # Generate a unique index for each new node ID\n", + " new_u_id = f\"{new_u}_{key}_{node_counter[new_u]}\" if new_u != u else new_u\n", + " new_v_id = f\"{new_v}_{key}_{node_counter[new_v]}\" if new_v != v else new_v\n", + "\n", + " # Increment the counter for each new node\n", + " node_counter[new_u] += 1\n", + " node_counter[new_v] += 1\n", + "\n", + " # Add new nodes and update the edge\n", + " drive_g_copy.add_node(new_u_id, **drive_g_copy.nodes[u])\n", + " drive_g_copy.add_node(new_v_id, **drive_g_copy.nodes[v])\n", + "\n", + " drive_g_copy.add_edge(new_u_id, new_v_id, key=key, **data)\n", + "\n", + " # Check if the reverse edge already exists in drive_g_copy\n", + " if not drive_g_copy.has_edge(new_v_id, new_u_id, key):\n", + " # Create the reverse edge with new nodes\n", + " drive_g_copy.add_edge(new_v_id, new_u_id, key=key, **data)\n", + "\n", + " # Disconnect the shared node from the new edge\n", + " drive_g_copy.remove_edge(u, v, key)\n", + "\n", + " # Remove the shared node\n", + " drive_g_copy.remove_node(shared_node)\n", + "\n", + "\n", + "\n", + " # Find strongly connected components in the modified drive_g graph\n", + " drive_g_scc = list(nx.strongly_connected_components(drive_g_copy))\n", + "\n", + " # Create a color mapping for edges in each strongly connected component using random colors\n", + " edge_colors = {}\n", + " for i, component in enumerate(drive_g_scc):\n", + " color = (random.random(), random.random(), random.random()) # RGB tuple with random values\n", + " for edge in drive_g_copy.edges:\n", + " if edge[0] in component and edge[1] in component:\n", + " edge_colors[edge] = color\n", + "\n", + " # Plot the graph with edge colors and without nodes\n", + " #fig, ax = ox.plot_graph(drive_g_copy, edge_color=[edge_colors.get(edge, (0, 0, 0)) for edge in drive_g_copy.edges], node_size=0, show=False, close=False, figsize=(20, 20))\n", + " #ox.plot_graph(sparse_drive_g, ax=ax, edge_color='red', edge_linewidth=2, node_size=0, show=True)\n", + " #fig.show()\n", + "\n", + "\n", + " ## add ssc index to each neighbourhood\n", + "\n", + " # Create a mapping from nodes to their SCC index\n", + " node_scc_mapping = {node: i for i, scc in enumerate(drive_g_scc) for node in scc}\n", + "\n", + " # Add SCC attribute to edges\n", + " for u, v, key, data in drive_g_copy.edges(keys=True, data=True):\n", + " scc_index_u = node_scc_mapping.get(u, None)\n", + " scc_index_v = node_scc_mapping.get(v, None)\n", + " \n", + " # Add the SCC index as an attribute to the edge\n", + " drive_g_copy[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v\n", + "\n", + "\n", + " ## join neighbourhood mapping to orignial driving graph\n", + "\n", + " # Add SCC index attribute to drive_g\n", + " for u, v, key, data in drive_g.edges(keys=True, data=True):\n", + " scc_index_u = node_scc_mapping.get(u, None)\n", + " scc_index_v = node_scc_mapping.get(v, None)\n", + " \n", + " # Add the SCC index as an attribute to the edge\n", + " drive_g[u][v][key]['scc_index'] = scc_index_u if scc_index_u is not None else scc_index_v\n", + "\n", + "\n", + "\n", + " ## get random nodes\n", + "\n", + "\n", + "\n", + "\n", + " # Function to get random nodes present in both graphs for each node\n", + " def get_random_nodes_for_each(graph1, graph2):\n", + " random_nodes_for_each = {}\n", + " common_nodes = set(graph1.nodes()) & set(graph2.nodes())\n", + " total_common_nodes = len(common_nodes)\n", + " num_nodes = min(iterations, max(1, int(total_common_nodes * 0.9))) # 10% less than the total number of common nodes, capped at the input max iterations\n", + "\n", + " for node in common_nodes:\n", + " neighbors = list(set(graph1.neighbors(node)) & set(graph2.neighbors(node)))\n", + " if len(neighbors) >= num_nodes:\n", + " random_neighbors = random.sample(neighbors, num_nodes)\n", + " else:\n", + " random_neighbors = neighbors + random.sample(list(common_nodes - set(neighbors)), num_nodes - len(neighbors))\n", + " random_nodes_for_each[node] = random_neighbors\n", + " return random_nodes_for_each\n", + "\n", + "\n", + "\n", + " # Get random nodes for each common node\n", + " random_nodes_for_each = get_random_nodes_for_each(drive_g, sparse_drive_g)\n", + "\n", + "\n", + "\n", + " # Print random nodes for each common node\n", + " #for node, random_neighbors in random_nodes_for_each.items():\n", + " #print(f\"Random nodes for node {node}: {random_neighbors}\")\n", + "\n", + "\n", + " ## Find shortest paths \n", + "\n", + " # Convert the dictionary of nodes into a list of tuples\n", + " nodes_list = [(key, value) for key, values in random_nodes_for_each.items() for value in values]\n", + "\n", + " # Find shortest paths and store them in a dictionary\n", + " shortest_paths = {}\n", + " for start_node, end_node in nodes_list:\n", + " try:\n", + " shortest_path = nx.shortest_path(drive_g, start_node, end_node, weight='travel_time')\n", + " shortest_paths[(start_node, end_node)] = shortest_path\n", + " except nx.NetworkXNoPath:\n", + " print(f\"No path found between {start_node} and {end_node}. Skipping...\")\n", + "\n", + " # Print the shortest paths\n", + " #for key, value in shortest_paths.items():\n", + " #print(f\"Shortest path from {key[0]} to {key[1]}: {value}\")\n", + "\n", + "\n", + "\n", + "\n", + " ## find edges passed through\n", + "\n", + " edges_passed_through = set()\n", + "\n", + " for path in shortest_paths.values():\n", + " # Pair consecutive nodes to create edges\n", + " path_edges = [(path[i], path[i+1]) for i in range(len(path)-1)]\n", + " \n", + " # Check if each edge exists in the graph\n", + " for edge in path_edges:\n", + " if edge in drive_g.edges:\n", + " edges_passed_through.add(edge)\n", + "\n", + " # Convert the set of edges to a list if needed\n", + " edges_passed_through = list(edges_passed_through)\n", + "\n", + " for u, v, data in drive_g.edges(data=True):\n", + " if (u, v) in edges_passed_through or (v, u) in edges_passed_through:\n", + " data['rat_run'] = True\n", + " else:\n", + " data['rat_run'] = False\n", + "\n", + "\n", + " # Convert the NetworkX graph to a GeoDataFrame\n", + " drive_gdf_nodes, drive_gdf_edges = ox.graph_to_gdfs(drive_g)\n", + "\n", + " drive_gdf_edges = drive_gdf_edges.to_crs(27700)\n", + " drive_gdf_nodes = drive_gdf_nodes.to_crs(27700)\n", + "\n", + "\n", + " # Filter drive_gdf_edges to only include edges with 'rat_run' = True\n", + " rat_run_edges = drive_gdf_edges[drive_gdf_edges['rat_run'] == True]\n", + "\n", + " # reset crs\n", + " neighbourhoods = neighbourhoods.to_crs(27700)\n", + "\n", + " # Perform spatial join between neighbourhoods and rat_run_edges\n", + " join_result = gpd.sjoin(neighbourhoods, rat_run_edges, how='left', op='intersects')\n", + "\n", + " # Group by neighbourhood index and count the number of rat_run edges in each\n", + " rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count')\n", + "\n", + " # Group by neighbourhood index and count the number of rat_run edges in each\n", + " rat_run_edge_count = join_result.groupby(join_result.index)['ID'].count().reset_index(name='rat_run_edge_count')\n", + "\n", + " # reset crs\n", + " neighbourhoods = neighbourhoods.to_crs(27700)\n", + "\n", + " # Join rat_run_edge_count with neighbourhoods based on index\n", + " neighbourhoods = neighbourhoods.join(rat_run_edge_count.set_index('index'))\n", + "\n", + " ## now we should have filters_results_gdf and access_results_gdf, and neighbourhoods with rat run counts joined\n", + "\n", + " filters_results_gdf, access_results_gdf, neighbourhoods\n", + "\n", + " ## join all together\n", + "\n", + " results_gdf = gpd.GeoDataFrame(filters_results_gdf.merge(access_results_gdf, on=\"ID\", suffixes=('_filters', \"_access\")))\n", + " results_gdf = results_gdf.set_geometry('geometry_access')\n", + " final_results_gdf = results_gdf.merge(neighbourhoods[['ID', 'rat_run_edge_count']], on='ID', how='left')\n", + " final_results_gdf['geometry'] = final_results_gdf['geometry_filters']\n", + " final_results_gdf = final_results_gdf.set_geometry('geometry')\n", + " final_results_gdf.drop(columns=['geometry_filters', 'geometry_access'], inplace=True)\n", + "\n", + "\n", + "\n", + " # Define the scoring function for \"rat_run_edge_count\"\n", + " def score_rat_run_edge_count(value):\n", + " if value <= 1:\n", + " return 100\n", + " else:\n", + " return 100 / (2 ** value) # Exponetial scoring\n", + "\n", + " # Apply the scoring function to the \"rat_run_edge_count\" column\n", + " final_results_gdf[\"rat_run_score\"] = final_results_gdf[\"rat_run_edge_count\"].apply(score_rat_run_edge_count)\n", + "\n", + " import math\n", + "\n", + " def score_mean_distance_diff(value):\n", + " if value >= 0:\n", + " return 0\n", + " elif value <= -750: # set a 750m cut off\n", + " return 100\n", + " else:\n", + " normalized_value = abs(value) / 750 # Normalize the value between 0 and 1\n", + " score = 100 * (1 - math.exp(-5 * normalized_value)) # Exponential increase\n", + " return score\n", + "\n", + " # Apply the modified scoring function to the \"mean_distance_diff\" column\n", + " final_results_gdf[\"mean_distance_diff_score\"] = final_results_gdf[\"mean_distance_diff\"].apply(score_mean_distance_diff)\n", + "\n", + " def score_road_density_filters(value):\n", + " if value <= 0:\n", + " return 0\n", + " elif value >= 40:\n", + " return 100\n", + " else:\n", + " return (value / 40) * 100\n", + "\n", + " # Apply the scoring function to the \"road_density_filters\" column\n", + " final_results_gdf[\"filter_road_density_score\"] = final_results_gdf[\"filter_road_density\"].apply(score_road_density_filters)\n", + "\n", + " # Create the \"scored_neighbourhoods\" geodataframe with the necessary columns\n", + " scored_neighbourhoods = final_results_gdf[[\"geometry\", \"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]]\n", + "\n", + " # Calculate overall score\n", + " scored_neighbourhoods[\"overall_score\"] = (scored_neighbourhoods[\"rat_run_score\"] + scored_neighbourhoods[\"mean_distance_diff_score\"] + scored_neighbourhoods[\"filter_road_density_score\"]) / 3\n", + "\n", + " # Define weights for each score\n", + " weight_rat_run_score = 1\n", + " weight_mean_distance_diff_score = 0.25\n", + " weight_road_density_filters_score = 0.75\n", + "\n", + " # Calculate overall score with weights\n", + " scored_neighbourhoods[\"overall_score\"] = (\n", + " (weight_rat_run_score * scored_neighbourhoods[\"rat_run_score\"]) +\n", + " (weight_mean_distance_diff_score * scored_neighbourhoods[\"mean_distance_diff_score\"]) +\n", + " (weight_road_density_filters_score * scored_neighbourhoods[\"filter_road_density_score\"])\n", + " ) / (weight_rat_run_score + weight_mean_distance_diff_score + weight_road_density_filters_score)\n", + "\n", + "\n", + "\n", + " ## find elbow point for k-means clustering\n", + "\n", + " # Selecting the features for clustering\n", + " X = scored_neighbourhoods[[\"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]]\n", + "\n", + " # Initialize a list to store the within-cluster sum of squares (WCSS) for different values of K\n", + " wcss = []\n", + "\n", + " # Define the range of K values to try\n", + " k_values = range(1, 11)\n", + "\n", + " # Calculate WCSS for each value of K\n", + " for k in k_values:\n", + " kmeans = KMeans(n_clusters=k, random_state=42)\n", + " kmeans.fit(X)\n", + " wcss.append(kmeans.inertia_)\n", + "\n", + " # Plotting the elbow curve\n", + " plt.plot(k_values, wcss, marker='o')\n", + " plt.title('Elbow Method')\n", + " plt.xlabel('Number of Clusters (K)')\n", + " plt.ylabel('WCSS')\n", + " plt.xticks(k_values)\n", + " plt.show()\n", + "\n", + " ## Run k-means clustering\n", + " # Define the number of clusters\n", + " k = 2\n", + "\n", + " # Select the features for clustering\n", + " features = [\"rat_run_score\", \"mean_distance_diff_score\", \"filter_road_density_score\"]\n", + "\n", + " # Extract the features from the dataframe\n", + " X = scored_neighbourhoods[features]\n", + "\n", + " # Initialize the KMeans model\n", + " kmeans = KMeans(n_clusters=k, random_state=42)\n", + "\n", + " # Fit the model to the data\n", + " kmeans.fit(X)\n", + "\n", + " # Get the cluster labels\n", + " cluster_labels = kmeans.labels_\n", + "\n", + " # Add the cluster labels to the dataframe\n", + " scored_neighbourhoods[\"cluster_label\"] = cluster_labels\n", + "\n", + "\n", + "\n", + "\n", + "\n", + " ## maps\n", + " ## adjust geodataframe contents for plotting purposes\n", + "\n", + " replacement_map = {\n", + " 'barrier or bollard': 'Barrier or Bollard',\n", + " 'bus gate': 'Bus Gate',\n", + " 'one-way bike': 'Cycle Contraflow',\n", + " 'street continuation': 'Street Continuation'\n", + " }\n", + "\n", + " # Replace filter types in the DataFrame\n", + " filters['filter_type'] = filters['filter_type'].map(replacement_map).fillna(filters['filter_type'])\n", + "\n", + "\n", + " import folium\n", + " import branca.colormap as cm\n", + " from folium.plugins import MarkerCluster\n", + "\n", + "\n", + " # Assuming you have already loaded your GeoDataFrames: scored_neighbourhoods, filters_results_gdf, and streets_gdf\n", + "\n", + " # Calculate the centroid of the scored_neighbourhoods GeoDataFrame\n", + " centroid = scored_neighbourhoods.geometry.centroid.iloc[0]\n", + " center_latitude, center_longitude = centroid.y, centroid.x\n", + "\n", + " # Create a Folium map centered around the centroid of scored_neighbourhoods\n", + " m = folium.Map(location=[center_latitude, center_longitude], zoom_start=12)\n", + "\n", + " # Define the colormap using cm.linear.viridis\n", + " cmap = cm.linear.viridis.scale(scored_neighbourhoods['overall_score'].min(), scored_neighbourhoods['overall_score'].max())\n", + "\n", + " # Plot scored_neighbourhoods using the Viridis colormap\n", + " folium.GeoJson(scored_neighbourhoods,\n", + " name= \"Scored Neighbourhoods\",\n", + " style_function=lambda x: {'fillColor': cmap(x['properties']['overall_score']),\n", + " 'color': cmap(x['properties']['overall_score']),\n", + " 'weight': 1, 'fillOpacity': 0.7},\n", + " tooltip=folium.features.GeoJsonTooltip(\n", + " fields=['rat_run_score', 'mean_distance_diff_score', 'filter_road_density_score', 'overall_score', 'cluster_label'],\n", + " aliases=['Rat Run Score', 'Mean Distance Diff Score', 'Filter Road Density Score', 'Overall Score', 'Cluster Label'])\n", + " ).add_to(m)\n", + "\n", + " # Plot streets_gdf on the map with default blue color and slightly transparent\n", + " streets_layer = folium.GeoJson(drive_gdf_edges,\n", + " name=\"Streets\",\n", + " style_function=lambda x: {'color': 'lightgreen', 'weight': 1, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + " # Plot rat_run_edges on the map with red color\n", + " rat_runs_layer = folium.GeoJson(rat_run_edges,\n", + " name=\"Rat Runs\",\n", + " style_function=lambda x: {'color': 'red', 'weight': 1.5, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + " # Plot boundary_roads on the map with orange color and thicker weight\n", + " boundary_roads_layer = folium.GeoJson(boundary_roads,\n", + " name=\"Busy Roads\",\n", + " style_function=lambda x: {'color': 'orange', 'weight': 3, 'fillOpacity': 0.7}\n", + " ).add_to(m)\n", + "\n", + "\n", + "\n", + " # Create a feature group for each type of layer\n", + " point_group = folium.FeatureGroup(name='Modal Filtering Points', show=True)\n", + " line_group = folium.FeatureGroup(name='Modal Filtering Streets', show=True)\n", + "\n", + "\n", + "\n", + " # Plot purple point markers for filters with tooltips\n", + " for _, row in filters.iterrows():\n", + " if row.geometry.type == 'Point':\n", + " tooltip_text = f\"Filter type: {row['filter_type']}\" # Concatenating \"Filter type:\" with the 'filter_type' value\n", + " folium.CircleMarker(location=[row.geometry.y, row.geometry.x], radius=2, color='purple', fill=True, fill_color='purple', tooltip=tooltip_text).add_to(point_group)\n", + " elif row.geometry.type == 'MultiLineString' or row.geometry.type == 'LineString':\n", + " tooltip_text = f\"Filter type: {row['filter_type']}\" # Concatenating \"Filter type:\" with the 'filter_type' value\n", + " folium.GeoJson(row.geometry, style_function=lambda x: {'color': 'purple', 'weight': 1.5, 'fillOpacity': 0.7}, tooltip=tooltip_text).add_to(line_group)\n", + "\n", + "\n", + " # Add layer groups to the map\n", + " point_group.add_to(m)\n", + " line_group.add_to(m)\n", + "\n", + " # Add layer control\n", + " folium.LayerControl(autoZIndex=True).add_to(m)\n", + "\n", + " cmap.caption = 'LTN Plausiblity Scores (Possible range: 0-100)'\n", + " cmap.add_to(m)\n", + "\n", + "\n", + " # add text\n", + " from folium import IFrame\n", + "\n", + " # Define the HTML content for the text\n", + " html_text = \"\"\"\n", + "
\n", + "

Scored neighbourhoods show a LTN 'Plausibility' score which incorporates metrics based on the presence of rat-runs, modal filters and measures of neighbourhood permeability. Map results are experimental, and should be treated as such. Get in touch via c.larkin@newcastle.ac.uk or https://github.com/Froguin99/LTN-Detection.

\n", + "
\n", + " \"\"\"\n", + "\n", + " # Add the HTML content to the map\n", + " folium.MacroElement().add_to(m)\n", + " m.get_root().html.add_child(folium.Element(html_text))\n", + "\n", + "\n", + " # save to geopackage\n", + "\n", + " # Extract place name without \", United Kingdom\"\n", + " place_name = place.replace(\", United Kingdom\", \"\").strip()\n", + "\n", + " # Create the file paths\n", + " map_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\Examples\\maps', f'{place_name}_example.html')\n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\scored_neighbourhoods', f'scored_neighbourhoods_{place_name}.gpkg')\n", + "\n", + " # Export map\n", + " m.save(map_file_path)\n", + "\n", + " # Send to geopackage \n", + " geometry_column = scored_neighbourhoods.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in scored_neighbourhoods.columns:\n", + " if column != geometry_column:\n", + " scored_neighbourhoods[column] = scored_neighbourhoods[column].astype(str)\n", + "\n", + " scored_neighbourhoods.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + " ## export rat runs \n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\rat_runs', f'rat_runs_{place_name}.gpkg')\n", + "\n", + "\n", + " # Send to geopackage \n", + " geometry_column = rat_run_edges.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in rat_run_edges.columns:\n", + " if column != geometry_column:\n", + " rat_run_edges[column] = rat_run_edges[column].astype(str)\n", + "\n", + " rat_run_edges.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + "\n", + " ## export modal filters\n", + " geopackage_file_path = os.path.join(r'C:\\Users\\b8008458\\OneDrive - Newcastle University\\2022 to 2023\\PhD\\ltnDetection\\LTN-Detection\\data\\filters', f'filters_{place_name}.gpkg')\n", + "\n", + "\n", + " # Send to geopackage \n", + " geometry_column = filters.geometry.name\n", + "\n", + " # Iterate through the columns and convert them to strings\n", + " for column in filters.columns:\n", + " if column != geometry_column:\n", + " filters[column] = filters[column].astype(str)\n", + "\n", + " filters.to_file(geopackage_file_path, driver=\"GPKG\")\n", + "\n", + "\n", + "\n", + " # Display the map\n", + " #m\n", + "\n", + " print(\"Finished\", place)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}