diff --git a/api/failing.json b/api/failing.json index 53be860fa3..6b15feac30 100644 --- a/api/failing.json +++ b/api/failing.json @@ -1 +1 @@ -[{"council_id":"AGB","missing":false,"latest_run":{"status_code":1,"log_text":"[12:34:24] Fetching Scraper for: AGB handlers.py:23\n Begin attempting to scrape: AGB handlers.py:27\n Deleting existing data... base.py:239\n[12:34:25] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:34:26] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.argyll-bute.gov.uk/councillor_list \n[12:34:29] list index out of range handlers.py:36\n Finished attempting to scrape: AGB base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-05 12:34:24.456967","end":"2023-10-05 12:34:29.298603","duration":4}},{"council_id":"BRT","missing":false,"latest_run":{"status_code":1,"log_text":"[14:17:31] Fetching Scraper for: BRT handlers.py:23\n Begin attempting to scrape: BRT handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n[14:17:32] ...found 44 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n ...found 44 files in Councillors/raw base.py:207\n ...found 89 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 89 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.broxtowe.gov.uk/mgWebService.asmx/GetCoun \n cillorsByWard \n[14:19:42] HTTPConnectionPool(host='democracy.broxtowe.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 110] Connection timed out')) \n[14:19:43] Finished attempting to scrape: BRT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='democracy.broxtowe.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='democracy.broxtowe.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n","start":"2023-10-05 14:17:31.036425","end":"2023-10-05 14:19:43.179800","duration":132}},{"council_id":"BRX","missing":false,"latest_run":{"status_code":1,"log_text":"[14:23:26] Fetching Scraper for: BRX handlers.py:23\n Begin attempting to scrape: BRX handlers.py:27\n[14:23:27] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:23:28] ...data deleted. base.py:246\n Scraping from https://www.broxbourne.gov.uk/councillors base.py:42\n[14:23:29] list index out of range handlers.py:36\n Finished attempting to scrape: BRX base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-05 14:23:26.809491","end":"2023-10-05 14:23:29.530424","duration":2}},{"council_id":"BRY","missing":false,"latest_run":{"status_code":1,"log_text":"[14:07:07] Fetching Scraper for: BRY handlers.py:23\n Begin attempting to scrape: BRY handlers.py:27\n Deleting existing data... base.py:239\n[14:07:08] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:07:09] ...data deleted. base.py:246\n Scraping from base.py:42\n http://cds.bromley.gov.uk/mgWebService.asmx/GetCouncillors \n ByWard \n[14:09:20] HTTPConnectionPool(host='cds.bromley.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 110] Connection timed out')) \n Finished attempting to scrape: BRY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='cds.bromley.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='cds.bromley.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n","start":"2023-10-05 14:07:07.527065","end":"2023-10-05 14:09:20.678545","duration":133}},{"council_id":"CAY","missing":false,"latest_run":{"status_code":1,"log_text":"[12:19:57] Fetching Scraper for: CAY handlers.py:23\n Begin attempting to scrape: CAY handlers.py:27\n[12:19:58] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:19:59] ...data deleted. base.py:246\n Scraping from base.py:42\n http://www.democracy.caerphilly.gov.uk/mgWebService.asmx/G \n etCouncillorsByWard \n HTTPSConnectionPool(host='www.democracy.caerphilly.gov handlers.py:36\n .uk', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(CertificateError(\"hostname \n 'www.democracy.caerphilly.gov.uk' doesn't match either \n of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\"))) \n Finished attempting to scrape: CAY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 472, in connect\n _match_hostname(cert, self.assert_hostname or server_hostname)\n File \"/opt/python/urllib3/connection.py\", line 545, in _match_hostname\n match_hostname(cert, asserted_hostname)\n File \"/opt/python/urllib3/util/ssl_match_hostname.py\", line 150, in match_hostname\n raise CertificateError(\nurllib3.util.ssl_match_hostname.CertificateError: hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='www.democracy.caerphilly.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(CertificateError(\"hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\")))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='www.democracy.caerphilly.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(CertificateError(\"hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\")))\n","start":"2023-10-05 12:19:57.744370","end":"2023-10-05 12:19:59.861592","duration":2}},{"council_id":"CMD","missing":false,"latest_run":{"status_code":1,"log_text":"[13:03:10] Fetching Scraper for: CMD handlers.py:23\n Begin attempting to scrape: CMD handlers.py:27\n Deleting existing data... base.py:239\n[13:03:11] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:03:12] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.camden.gov.uk/mgWebService.asmx/GetCounci \n llorsByWard \n HTTPSConnectionPool(host='democracy.camden.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: CMD base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.camden.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.camden.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 13:03:10.423616","end":"2023-10-05 13:03:12.695249","duration":2}},{"council_id":"COT","missing":false,"latest_run":{"status_code":1,"log_text":"[14:20:11] Fetching Scraper for: COT handlers.py:23\n Begin attempting to scrape: COT handlers.py:27\n Deleting existing data... base.py:239\n[14:20:12] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:20:13] ...data deleted. base.py:246\n Scraping from base.py:42\n http://www.cmis.cotswold.gov.uk/cmis5/People/tabid/62/Scre \n enMode/Alphabetical/Default.aspx \n HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default \n .aspx (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno -2] Name or service not known')) \n Finished attempting to scrape: COT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 72, in create_connection\n for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\n File \"/var/lang/lib/python3.8/socket.py\", line 918, in getaddrinfo\n for res in _socket.getaddrinfo(host, port, family, type, proto, flags):\nsocket.gaierror: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', port=80): Max retries exceeded with url: /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default.aspx (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', port=80): Max retries exceeded with url: /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default.aspx (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known'))\n","start":"2023-10-05 14:20:11.341682","end":"2023-10-05 14:20:13.621236","duration":2}},{"council_id":"CWY","missing":false,"latest_run":{"status_code":1,"log_text":"[12:12:01] Fetching Scraper for: CWY handlers.py:23\n Begin attempting to scrape: CWY handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[12:12:02] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:12:03] ...data deleted. base.py:246\n Scraping from base.py:42\n http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCouncil \n lorsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCou \n ncillorsByWard \n Finished attempting to scrape: CWY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-05 12:12:01.223757","end":"2023-10-05 12:12:03.403789","duration":2}},{"council_id":"EAL","missing":false,"latest_run":{"status_code":1,"log_text":"[12:12:18] Fetching Scraper for: EAL handlers.py:23\n Begin attempting to scrape: EAL handlers.py:27\n Deleting existing data... base.py:239\n[12:12:19] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:12:20] ...data deleted. base.py:246\n Scraping from base.py:42\n http://ealing.cmis.uk.com/ealing/Councillors.aspx \n 404 Client Error: Not Found for url: handlers.py:36\n http://ealing.cmis.uk.com/ealing/Councillors.aspx \n Finished attempting to scrape: EAL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://ealing.cmis.uk.com/ealing/Councillors.aspx\n","start":"2023-10-05 12:12:18.494396","end":"2023-10-05 12:12:20.440534","duration":1}},{"council_id":"ELI","missing":false,"latest_run":{"status_code":1,"log_text":"[13:13:13] Fetching Scraper for: ELI handlers.py:23\n Begin attempting to scrape: ELI handlers.py:27\n Deleting existing data... base.py:239\n[13:13:14] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:13:15] ...data deleted. base.py:246\n Scraping from base.py:42\n https://democracy.e-lindsey.gov.uk/mgWebService.asmx/GetCo \n uncillorsByWard \n HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: ELI base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 13:13:13.462592","end":"2023-10-05 13:13:15.625105","duration":2}},{"council_id":"ERW","missing":false,"latest_run":{"status_code":1,"log_text":"[14:17:12] Fetching Scraper for: ERW handlers.py:23\n Begin attempting to scrape: ERW handlers.py:27\n Deleting existing data... base.py:239\n[14:17:13] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:17:14] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.eastrenfrewshire.gov.uk/Find-my-councillor \n[14:17:15] Scraping from base.py:42\n https://www.eastrenfrewshire.gov.uk/councillor-angela-conv \n ery \n[14:17:16] 'NoneType' object is not subscriptable handlers.py:36\n Finished attempting to scrape: ERW base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"scrapers/ERW-east-renfrewshire/councillors.py\", line 50, in get_single_councillor\n contact_url = soup.select_one(\".panel__list--relarticles a.panel__link\")[\"href\"]\nTypeError: 'NoneType' object is not subscriptable\n","start":"2023-10-05 14:17:12.577495","end":"2023-10-05 14:17:16.861049","duration":4}},{"council_id":"FYL","missing":false,"latest_run":{"status_code":1,"log_text":"[12:54:03] Fetching Scraper for: FYL handlers.py:23\n Begin attempting to scrape: FYL handlers.py:27\n[12:54:04] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:54:05] ...data deleted. base.py:246\n Scraping from base.py:42\n https://fylde.cmis.uk.com/fylde/CouncillorsandMP.aspx \n[12:54:07] 'title' handlers.py:36\n Finished attempting to scrape: FYL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 267, in get_single_councillor\n party = self.get_party_name(list_page_html)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 253, in get_party_name\n return list_page_html.find_all(\"img\")[-1][\"title\"].replace(\"(logo)\", \"\").strip()\n File \"/opt/python/bs4/element.py\", line 1573, in __getitem__\n return self.attrs[key]\nKeyError: 'title'\n","start":"2023-10-05 12:54:03.972242","end":"2023-10-05 12:54:07.328992","duration":3}},{"council_id":"HAO","missing":false,"latest_run":{"status_code":1,"log_text":"[13:26:36] Fetching Scraper for: HAO handlers.py:23\n Begin attempting to scrape: HAO handlers.py:27\n Deleting existing data... base.py:239\n[13:26:37] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:26:38] ...data deleted. base.py:246\n Scraping from base.py:42\n https://cmis.harborough.gov.uk/cmis5/Councillors.aspx \n HTTPSConnectionPool(host='cmis.harborough.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /cmis5/Councillors.aspx (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: HAO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cmis.harborough.gov.uk', port=443): Max retries exceeded with url: /cmis5/Councillors.aspx (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='cmis.harborough.gov.uk', port=443): Max retries exceeded with url: /cmis5/Councillors.aspx (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 13:26:36.439706","end":"2023-10-05 13:26:38.571993","duration":2}},{"council_id":"HER","missing":false,"latest_run":{"status_code":1,"log_text":"[12:54:46] Fetching Scraper for: HER handlers.py:23\n Begin attempting to scrape: HER handlers.py:27\n Deleting existing data... base.py:239\n[12:54:47] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:54:48] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www5.hertsmere.gov.uk/democracy//mgWebService.asmx \n /GetCouncillorsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n https://www5.hertsmere.gov.uk/democracy//mgWebService. \n asmx/GetCouncillorsByWard \n Finished attempting to scrape: HER base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www5.hertsmere.gov.uk/democracy//mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-05 12:54:46.534733","end":"2023-10-05 12:54:48.832620","duration":2}},{"council_id":"HMF","missing":false,"latest_run":{"status_code":1,"log_text":"[14:32:57] Fetching Scraper for: HMF handlers.py:23\n Begin attempting to scrape: HMF handlers.py:27\n Deleting existing data... base.py:239\n[14:32:58] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:32:59] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCouncill \n orsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCoun \n cillorsByWard \n Finished attempting to scrape: HMF base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-05 14:32:57.405413","end":"2023-10-05 14:32:59.687694","duration":2}},{"council_id":"HNS","missing":false,"latest_run":{"status_code":1,"log_text":"[12:29:40] Fetching Scraper for: HNS handlers.py:23\n Begin attempting to scrape: HNS handlers.py:27\n Deleting existing data... base.py:239\n[12:29:41] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:29:42] ...data deleted. base.py:246\n Scraping from base.py:42\n https://democraticservices.hounslow.gov.uk/mgWebService.as \n mx/GetCouncillorsByWard \n HTTPSConnectionPool(host='democraticservices.hounslow. handlers.py:36\n gov.uk', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: HNS base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democraticservices.hounslow.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democraticservices.hounslow.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 12:29:40.629151","end":"2023-10-05 12:29:42.742927","duration":2}},{"council_id":"HUN","missing":false,"latest_run":{"status_code":1,"log_text":"[12:13:40] Fetching Scraper for: HUN handlers.py:23\n Begin attempting to scrape: HUN handlers.py:27\n Deleting existing data... base.py:239\n[12:13:41] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:13:42] ...data deleted. base.py:246\n Scraping from base.py:42\n http://applications.huntingdonshire.gov.uk/moderngov//mgWe \n bService.asmx/GetCouncillorsByWard \n 500 Server Error: Internal Server Error for url: handlers.py:36\n http://applications.huntingdonshire.gov.uk/moderngov// \n mgWebService.asmx/GetCouncillorsByWard \n Finished attempting to scrape: HUN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http://applications.huntingdonshire.gov.uk/moderngov//mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-05 12:13:40.527144","end":"2023-10-05 12:13:42.636738","duration":2}},{"council_id":"KEC","missing":false,"latest_run":{"status_code":1,"log_text":"[12:12:23] Fetching Scraper for: KEC handlers.py:23\n Begin attempting to scrape: KEC handlers.py:27\n Deleting existing data... base.py:239\n[12:12:25] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:12:26] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.rbkc.gov.uk/committees/Councillors.aspx \n[12:12:28] 404 Client Error: Not Found for url: handlers.py:36\n https://rbkc.moderngov.co.uk/Committees/mgError.aspx \n[12:12:29] Finished attempting to scrape: KEC base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://rbkc.moderngov.co.uk/Committees/mgError.aspx\n","start":"2023-10-05 12:12:23.185679","end":"2023-10-05 12:12:29.026513","duration":5}},{"council_id":"LEE","missing":false,"latest_run":{"status_code":1,"log_text":"[12:04:38] Fetching Scraper for: LEE handlers.py:23\n Begin attempting to scrape: LEE handlers.py:27\n Deleting existing data... base.py:239\n[12:04:39] Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n ...found 40 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n[12:04:40] ...found 40 files in Councillors/raw base.py:207\n ...found 81 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 81 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.lewes-eastbourne.gov.uk//mgWebService.asm \n x/GetCouncillorsByWard \n[12:04:46] argument of type 'NoneType' is not iterable handlers.py:36\n Committing batch 1 consisting of 80 files base.py:274\n[12:04:47] Finished attempting to scrape: LEE base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 183, in run\n councillor = self.get_single_councillor(ward, councillor_xml)\n File \"scrapers/LEE-lewes/councillors.py\", line 13, in get_single_councillor\n if \"lewes.gov.uk\" in email:\nTypeError: argument of type 'NoneType' is not iterable\n","start":"2023-10-05 12:04:38.633461","end":"2023-10-05 12:04:47.449974","duration":8}},{"council_id":"MIK","missing":false,"latest_run":{"status_code":1,"log_text":"[14:01:10] Fetching Scraper for: MIK handlers.py:23\n Begin attempting to scrape: MIK handlers.py:27\n Deleting existing data... base.py:239\n[14:01:11] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:01:12] ...data deleted. base.py:246\n Scraping from base.py:42\n http://milton-keynes.cmis.uk.com/milton-keynes/Councillors \n .aspx \n 404 Client Error: Not Found for url: handlers.py:36\n http://milton-keynes.cmis.uk.com/milton-keynes/Council \n lors.aspx \n Finished attempting to scrape: MIK base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://milton-keynes.cmis.uk.com/milton-keynes/Councillors.aspx\n","start":"2023-10-05 14:01:10.279084","end":"2023-10-05 14:01:12.648287","duration":2}},{"council_id":"MOL","missing":false,"latest_run":{"status_code":1,"log_text":"[14:25:07] Fetching Scraper for: MOL handlers.py:23\n Begin attempting to scrape: MOL handlers.py:27\n Deleting existing data... base.py:239\n[14:25:08] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:25:09] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.molevalley.gov.uk/home/council/councillors/who \n -are-your-councillors \n HTTPSConnectionPool(host='www.molevalley.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /home/council/councillors/who-are-your-councillors \n (Caused by SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: MOL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='www.molevalley.gov.uk', port=443): Max retries exceeded with url: /home/council/councillors/who-are-your-councillors (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='www.molevalley.gov.uk', port=443): Max retries exceeded with url: /home/council/councillors/who-are-your-councillors (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 14:25:07.446841","end":"2023-10-05 14:25:09.685208","duration":2}},{"council_id":"NEL","missing":false,"latest_run":{"status_code":1,"log_text":"[13:01:50] Fetching Scraper for: NEL handlers.py:23\n[13:01:51] Begin attempting to scrape: NEL handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:01:52] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.nelincs.gov.uk/your-council/councillors-mps-an \n d-meps/find-your-councillor/councillors-by-party/ \n[13:01:56] More than one element selected handlers.py:36\n Finished attempting to scrape: NEL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 144, in get_list_container\n raise ValueError(\"More than one element selected\")\nValueError: More than one element selected\n","start":"2023-10-05 13:01:50.962557","end":"2023-10-05 13:01:56.716780","duration":5}},{"council_id":"NNO","missing":false,"latest_run":{"status_code":1,"log_text":"[12:29:22] Fetching Scraper for: NNO handlers.py:23\n Begin attempting to scrape: NNO handlers.py:27\n[12:29:23] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:29:24] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.north-norfolk.gov.uk/members/#filter-form \n[12:29:26] list index out of range handlers.py:36\n Finished attempting to scrape: NNO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"scrapers/NNO-north-norfolk/councillors.py\", line 15, in get_councillors\n return super().get_councillors()[1:]\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-05 12:29:22.801348","end":"2023-10-05 12:29:26.856409","duration":4}},{"council_id":"OAD","missing":false,"latest_run":{"status_code":1,"log_text":"[13:21:23] Fetching Scraper for: OAD handlers.py:23\n Begin attempting to scrape: OAD handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[13:21:24] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:21:25] ...data deleted. base.py:246\n Scraping from base.py:42\n http://moderngov.oadby-wigston.gov.uk/mgWebService.asmx/Ge \n tCouncillorsByWard \n ('Connection aborted.', ConnectionResetError(104, handlers.py:36\n 'Connection reset by peer')) \n Finished attempting to scrape: OAD base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 466, in _make_request\n six.raise_from(e, None)\n File \"\", line 3, in raise_from\n File \"/opt/python/urllib3/connectionpool.py\", line 461, in _make_request\n httplib_response = conn.getresponse()\n File \"/var/lang/lib/python3.8/http/client.py\", line 1348, in getresponse\n response.begin()\n File \"/var/lang/lib/python3.8/http/client.py\", line 316, in begin\n version, status, reason = self._read_status()\n File \"/var/lang/lib/python3.8/http/client.py\", line 277, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n File \"/var/lang/lib/python3.8/socket.py\", line 669, in readinto\n return self._sock.recv_into(b)\nConnectionResetError: [Errno 104] Connection reset by peer\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 550, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/opt/python/urllib3/packages/six.py\", line 769, in reraise\n raise value.with_traceback(tb)\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 466, in _make_request\n six.raise_from(e, None)\n File \"\", line 3, in raise_from\n File \"/opt/python/urllib3/connectionpool.py\", line 461, in _make_request\n httplib_response = conn.getresponse()\n File \"/var/lang/lib/python3.8/http/client.py\", line 1348, in getresponse\n response.begin()\n File \"/var/lang/lib/python3.8/http/client.py\", line 316, in begin\n version, status, reason = self._read_status()\n File \"/var/lang/lib/python3.8/http/client.py\", line 277, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n File \"/var/lang/lib/python3.8/socket.py\", line 669, in readinto\n return self._sock.recv_into(b)\nurllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 501, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\n","start":"2023-10-05 13:21:23.161182","end":"2023-10-05 13:21:25.741504","duration":2}},{"council_id":"ORK","missing":false,"latest_run":{"status_code":1,"log_text":"[12:22:47] Fetching Scraper for: ORK handlers.py:23\n Begin attempting to scrape: ORK handlers.py:27\n Deleting existing data... base.py:239\n[12:22:48] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:22:49] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.orkney.gov.uk/Council/Councillors/councillor-p \n rofiles.htm \n 404 Client Error: Not Found for url: handlers.py:36\n https://www.orkney.gov.uk/Council/Councillors/councill \n or-profiles.htm \n Finished attempting to scrape: ORK base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.orkney.gov.uk/Council/Councillors/councillor-profiles.htm\n","start":"2023-10-05 12:22:47.292557","end":"2023-10-05 12:22:49.734331","duration":2}},{"council_id":"PEN","missing":false,"latest_run":{"status_code":1,"log_text":"[13:51:54] Fetching Scraper for: PEN handlers.py:23\n Begin attempting to scrape: PEN handlers.py:27\n[13:51:55] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n[13:51:56] ...found 15 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n ...found 15 files in Councillors/raw base.py:207\n ...found 31 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 31 files base.py:216\n[13:51:57] ...data deleted. base.py:246\n Scraping from https://www.pendle.gov.uk/councillors/name base.py:42\n[13:51:58] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/76/mohammed_adnan \n[13:51:59] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/83/faraz_ahmad \n[13:52:00] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/2/nadeem_ahmed \n[13:52:01] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/91/sajjad_ahmed \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/78/david_albin \n[13:52:02] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/67/zafar_ali \n[13:52:03] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/88/mohammad_ammer \n[13:52:04] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/84/ruby_anwar \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/8/naeem_hussain_ashr \n af \n[13:52:05] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/94/mohammad_aslam \n[13:52:06] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/12/neil_butterworth \n[13:52:07] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/13/rosemary_e_carrol \n l \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/85/chris_church \n[13:52:08] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/75/david_cockburn-pr \n ice \n[13:52:10] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/64/sarah_cockburn-pr \n ice \n[13:52:11] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/92/david_gallear \n[13:52:12] 'NoneType' object is not subscriptable handlers.py:36\n Committing batch 1 consisting of 30 files base.py:274\n[13:52:13] Finished attempting to scrape: PEN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"scrapers/PEN-pendle/councillors.py\", line 48, in get_single_councillor\n councillor.email = soup.select_one(\"li a[href^=mailto]\")[\"href\"].replace(\nTypeError: 'NoneType' object is not subscriptable\n","start":"2023-10-05 13:51:54.850409","end":"2023-10-05 13:52:13.605607","duration":18}},{"council_id":"ROS","missing":false,"latest_run":{"status_code":1,"log_text":"[12:05:07] Fetching Scraper for: ROS handlers.py:23\n Begin attempting to scrape: ROS handlers.py:27\n Deleting existing data... base.py:239\n[12:05:08] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:05:09] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.rossendale.gov.uk/councillors/name \n 404 Client Error: Not Found for url: handlers.py:36\n https://www.rossendale.gov.uk/councillors/name \n Finished attempting to scrape: ROS base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.rossendale.gov.uk/councillors/name\n","start":"2023-10-05 12:05:07.273619","end":"2023-10-05 12:05:09.563135","duration":2}},{"council_id":"SFT","missing":false,"latest_run":{"status_code":1,"log_text":"[13:28:51] Fetching Scraper for: SFT handlers.py:23\n Begin attempting to scrape: SFT handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:28:52] ...data deleted. base.py:246\n Scraping from base.py:42\n http://modgov.sefton.gov.uk/mgWebService.asmx/GetCouncillo \n rsByWard \n HTTPSConnectionPool(host='modgov.sefton.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n[13:28:53] Finished attempting to scrape: SFT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='modgov.sefton.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='modgov.sefton.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 13:28:51.008078","end":"2023-10-05 13:28:53.213430","duration":2}},{"council_id":"SHA","missing":false,"latest_run":{"status_code":1,"log_text":"[15:02:47] Fetching Scraper for: SHA handlers.py:23\n Begin attempting to scrape: SHA handlers.py:27\n Deleting existing data... base.py:239\n[15:02:48] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[15:02:49] ...data deleted. base.py:246\n Scraping from https://www.southhams.gov.uk/councillorsSH base.py:42\n[15:02:50] list index out of range handlers.py:36\n Finished attempting to scrape: SHA base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-05 15:02:47.361303","end":"2023-10-05 15:02:50.727124","duration":3}},{"council_id":"SHE","missing":false,"latest_run":{"status_code":null,"log_text":"[11:28:20] Fetching Scraper for: SHE handlers.py:22\n Begin attempting to scrape: SHE handlers.py:25\n Deleting existing data... base.py:234\n Getting all files in SHE... base.py:186\n[11:28:21] Getting all files in SHE/json... base.py:186\n ...found 30 files in SHE/json base.py:202\n Getting all files in SHE/raw... base.py:186\n ...found 30 files in SHE/raw base.py:202\n ...found 61 files in SHE base.py:202\n Deleting batch no. 1 consisting of 61 files base.py:211\n[11:28:32] An error occurred (ThrottlingException) when calling handlers.py:34\n the CreateCommit operation (reached max retries: 4): \n Rate exceeded \n Finished attempting to scrape: SHE base.py:319\n","errors":"An error occurred (ThrottlingException) when calling the CreateCommit operation (reached max retries: 4): Rate exceeded","start":"2022-04-04 11:28:20.509898","end":"2022-04-04 11:28:32.871624","duration":12}},{"council_id":"SHN","missing":false,"latest_run":{"status_code":1,"log_text":"[14:11:31] Fetching Scraper for: SHN handlers.py:23\n[14:11:32] Begin attempting to scrape: SHN handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[14:11:33] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://moderngov.sthelens.gov.uk/mgWebService.asmx/GetCoun \n cillorsByWard \n[14:11:36] HTTPConnectionPool(host='moderngov.sthelens.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 113] No route to host')) \n[14:11:38] Finished attempting to scrape: SHN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nOSError: [Errno 113] No route to host\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 113] No route to host\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='moderngov.sthelens.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='moderngov.sthelens.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))\n","start":"2023-10-05 14:11:31.988796","end":"2023-10-05 14:11:38.497411","duration":6}},{"council_id":"SNO","missing":false,"latest_run":{"status_code":1,"log_text":"[12:27:01] Fetching Scraper for: SNO handlers.py:23\n Begin attempting to scrape: SNO handlers.py:27\n Deleting existing data... base.py:239\n[12:27:02] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:27:03] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.southnorfolkandbroadland.gov.uk/directory/3/so \n uth-norfolk-councillor-directory/category/11 \n 404 Client Error: Not Found for url: handlers.py:36\n https://www.southnorfolkandbroadland.gov.uk/directory/ \n 3/south-norfolk-councillor-directory/category/11 \n Finished attempting to scrape: SNO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.southnorfolkandbroadland.gov.uk/directory/3/south-norfolk-councillor-directory/category/11\n","start":"2023-10-05 12:27:01.419038","end":"2023-10-05 12:27:03.755634","duration":2}},{"council_id":"SST","missing":false,"latest_run":{"status_code":1,"log_text":"[14:22:13] Fetching Scraper for: SST handlers.py:23\n Begin attempting to scrape: SST handlers.py:27\n[14:22:14] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:22:15] ...data deleted. base.py:246\n Scraping from base.py:42\n https://services.sstaffs.gov.uk/cmis/Councillors.aspx \n 404 Client Error: Not Found for url: handlers.py:36\n https://services.sstaffs.gov.uk/cmis/Councillors.aspx \n Finished attempting to scrape: SST base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://services.sstaffs.gov.uk/cmis/Councillors.aspx\n","start":"2023-10-05 14:22:13.796792","end":"2023-10-05 14:22:15.843399","duration":2}},{"council_id":"STG","missing":false,"latest_run":{"status_code":1,"log_text":"[12:59:37] Fetching Scraper for: STG handlers.py:23\n Begin attempting to scrape: STG handlers.py:27\n Deleting existing data... base.py:239\n[12:59:38] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:59:39] ...data deleted. base.py:246\n Scraping from https://www.stirling.gov.uk/councillors base.py:42\n[12:59:40] list index out of range handlers.py:36\n[12:59:41] Finished attempting to scrape: STG base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-05 12:59:37.184538","end":"2023-10-05 12:59:41.074441","duration":3}},{"council_id":"TES","missing":false,"latest_run":{"status_code":1,"log_text":"[12:29:45] Fetching Scraper for: TES handlers.py:23\n Begin attempting to scrape: TES handlers.py:27\n Deleting existing data... base.py:239\n[12:29:46] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:29:47] ...data deleted. base.py:246\n Scraping from base.py:42\n http://testvalley.cmis.uk.com/testvalleypublic/ElectedRepr \n esentatives/tabid/63/ScreenMode/Alphabetical/Default.aspx# \n MemberSectionA \n 404 Client Error: Not Found for url: handlers.py:36\n http://testvalley.cmis.uk.com/testvalleypublic/Elected \n Representatives/tabid/63/ScreenMode/Alphabetical/Defau \n lt.aspx#MemberSectionA \n Finished attempting to scrape: TES base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://testvalley.cmis.uk.com/testvalleypublic/ElectedRepresentatives/tabid/63/ScreenMode/Alphabetical/Default.aspx#MemberSectionA\n","start":"2023-10-05 12:29:45.667883","end":"2023-10-05 12:29:47.836564","duration":2}},{"council_id":"THE","missing":false,"latest_run":{"status_code":1,"log_text":"[12:20:39] Fetching Scraper for: THE handlers.py:23\n Begin attempting to scrape: THE handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[12:20:40] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.threerivers.gov.uk/listing/councillors \n[12:20:42] 'NoneType' object has no attribute 'findNext' handlers.py:36\n Finished attempting to scrape: THE base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"scrapers/THE-three-rivers/councillors.py\", line 13, in get_list_container\n return soup.find(\"h3\", text=\"District Councillor\").findNext(\"ul\")\nAttributeError: 'NoneType' object has no attribute 'findNext'\n","start":"2023-10-05 12:20:39.036121","end":"2023-10-05 12:20:42.689029","duration":3}},{"council_id":"TWH","missing":false,"latest_run":{"status_code":1,"log_text":"[12:11:20] Fetching Scraper for: TWH handlers.py:23\n Begin attempting to scrape: TWH handlers.py:27\n[12:11:21] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:11:22] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.towerhamlets.gov.uk/mgWebService.asmx/Get \n CouncillorsByWard \n HTTPSConnectionPool(host='democracy.towerhamlets.gov.u handlers.py:36\n k', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n[12:11:23] Finished attempting to scrape: TWH base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.towerhamlets.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.towerhamlets.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-05 12:11:20.884394","end":"2023-10-05 12:11:23.128339","duration":2}},{"council_id":"WRT","missing":false,"latest_run":{"status_code":1,"log_text":"[12:29:30] Fetching Scraper for: WRT handlers.py:23\n Begin attempting to scrape: WRT handlers.py:27\n[12:29:31] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:29:32] ...data deleted. base.py:246\n Scraping from https://www.warrington.gov.uk/councillors base.py:42\n[12:29:35] More than one element selected handlers.py:36\n Finished attempting to scrape: WRT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 144, in get_list_container\n raise ValueError(\"More than one element selected\")\nValueError: More than one element selected\n","start":"2023-10-05 12:29:30.894215","end":"2023-10-05 12:29:35.773933","duration":4}}] +[{"council_id":"AGB","missing":false,"latest_run":{"status_code":1,"log_text":"[13:03:26] Fetching Scraper for: AGB handlers.py:23\n Begin attempting to scrape: AGB handlers.py:27\n[13:03:27] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:03:28] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.argyll-bute.gov.uk/councillor_list \n[13:03:31] list index out of range handlers.py:36\n Finished attempting to scrape: AGB base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-06 13:03:26.814551","end":"2023-10-06 13:03:31.568306","duration":4}},{"council_id":"BRX","missing":false,"latest_run":{"status_code":1,"log_text":"[14:31:21] Fetching Scraper for: BRX handlers.py:23\n Begin attempting to scrape: BRX handlers.py:27\n Deleting existing data... base.py:239\n[14:31:22] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:31:23] ...data deleted. base.py:246\n Scraping from https://www.broxbourne.gov.uk/councillors base.py:42\n[14:31:24] list index out of range handlers.py:36\n Finished attempting to scrape: BRX base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-06 14:31:21.584349","end":"2023-10-06 14:31:24.386980","duration":2}},{"council_id":"BRY","missing":false,"latest_run":{"status_code":1,"log_text":"[12:10:50] Fetching Scraper for: BRY handlers.py:23\n Begin attempting to scrape: BRY handlers.py:27\n Deleting existing data... base.py:239\n[12:10:51] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:10:52] ...data deleted. base.py:246\n Scraping from base.py:42\n http://cds.bromley.gov.uk/mgWebService.asmx/GetCouncillors \n ByWard \n[12:13:02] HTTPConnectionPool(host='cds.bromley.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 110] Connection timed out')) \n Finished attempting to scrape: BRY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='cds.bromley.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='cds.bromley.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n","start":"2023-10-06 12:10:50.247729","end":"2023-10-06 12:13:02.762861","duration":132}},{"council_id":"CAY","missing":false,"latest_run":{"status_code":1,"log_text":"[14:08:37] Fetching Scraper for: CAY handlers.py:23\n Begin attempting to scrape: CAY handlers.py:27\n Deleting existing data... base.py:239\n[14:08:38] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:08:39] ...data deleted. base.py:246\n Scraping from base.py:42\n http://www.democracy.caerphilly.gov.uk/mgWebService.asmx/G \n etCouncillorsByWard \n HTTPSConnectionPool(host='www.democracy.caerphilly.gov handlers.py:36\n .uk', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(CertificateError(\"hostname \n 'www.democracy.caerphilly.gov.uk' doesn't match either \n of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\"))) \n Finished attempting to scrape: CAY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 472, in connect\n _match_hostname(cert, self.assert_hostname or server_hostname)\n File \"/opt/python/urllib3/connection.py\", line 545, in _match_hostname\n match_hostname(cert, asserted_hostname)\n File \"/opt/python/urllib3/util/ssl_match_hostname.py\", line 150, in match_hostname\n raise CertificateError(\nurllib3.util.ssl_match_hostname.CertificateError: hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='www.democracy.caerphilly.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(CertificateError(\"hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\")))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='www.democracy.caerphilly.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(CertificateError(\"hostname 'www.democracy.caerphilly.gov.uk' doesn't match either of '*.caerphilly.gov.uk', 'caerphilly.gov.uk'\")))\n","start":"2023-10-06 14:08:37.057185","end":"2023-10-06 14:08:39.698175","duration":2}},{"council_id":"CMD","missing":false,"latest_run":{"status_code":1,"log_text":"[12:34:24] Fetching Scraper for: CMD handlers.py:23\n Begin attempting to scrape: CMD handlers.py:27\n Deleting existing data... base.py:239\n[12:34:25] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:34:26] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.camden.gov.uk/mgWebService.asmx/GetCounci \n llorsByWard \n HTTPSConnectionPool(host='democracy.camden.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: CMD base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.camden.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.camden.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 12:34:24.463246","end":"2023-10-06 12:34:26.755669","duration":2}},{"council_id":"COT","missing":false,"latest_run":{"status_code":1,"log_text":"[12:59:54] Fetching Scraper for: COT handlers.py:23\n Begin attempting to scrape: COT handlers.py:27\n Deleting existing data... base.py:239\n[12:59:55] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:59:56] ...data deleted. base.py:246\n Scraping from base.py:42\n http://www.cmis.cotswold.gov.uk/cmis5/People/tabid/62/Scre \n enMode/Alphabetical/Default.aspx \n HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default \n .aspx (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno -2] Name or service not known')) \n Finished attempting to scrape: COT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 72, in create_connection\n for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\n File \"/var/lang/lib/python3.8/socket.py\", line 918, in getaddrinfo\n for res in _socket.getaddrinfo(host, port, family, type, proto, flags):\nsocket.gaierror: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno -2] Name or service not known\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', port=80): Max retries exceeded with url: /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default.aspx (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='www.cmis.cotswold.gov.uk', port=80): Max retries exceeded with url: /cmis5/People/tabid/62/ScreenMode/Alphabetical/Default.aspx (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known'))\n","start":"2023-10-06 12:59:54.681074","end":"2023-10-06 12:59:56.772371","duration":2}},{"council_id":"CWY","missing":false,"latest_run":{"status_code":1,"log_text":"[14:36:05] Fetching Scraper for: CWY handlers.py:23\n Begin attempting to scrape: CWY handlers.py:27\n Deleting existing data... base.py:239\n[14:36:06] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:36:07] ...data deleted. base.py:246\n Scraping from base.py:42\n http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCouncil \n lorsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCou \n ncillorsByWard \n Finished attempting to scrape: CWY base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://modgoveng.conwy.gov.uk/mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-06 14:36:05.316208","end":"2023-10-06 14:36:07.615762","duration":2}},{"council_id":"EAL","missing":false,"latest_run":{"status_code":1,"log_text":"[12:47:39] Fetching Scraper for: EAL handlers.py:23\n Begin attempting to scrape: EAL handlers.py:27\n[12:47:40] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:47:41] ...data deleted. base.py:246\n Scraping from base.py:42\n http://ealing.cmis.uk.com/ealing/Councillors.aspx \n 404 Client Error: Not Found for url: handlers.py:36\n http://ealing.cmis.uk.com/ealing/Councillors.aspx \n Finished attempting to scrape: EAL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://ealing.cmis.uk.com/ealing/Councillors.aspx\n","start":"2023-10-06 12:47:39.854134","end":"2023-10-06 12:47:41.795675","duration":1}},{"council_id":"ELI","missing":false,"latest_run":{"status_code":1,"log_text":"[14:20:43] Fetching Scraper for: ELI handlers.py:23\n Begin attempting to scrape: ELI handlers.py:27\n[14:20:44] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:20:45] ...data deleted. base.py:246\n Scraping from base.py:42\n https://democracy.e-lindsey.gov.uk/mgWebService.asmx/GetCo \n uncillorsByWard \n HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: ELI base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.e-lindsey.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 14:20:43.721844","end":"2023-10-06 14:20:45.974786","duration":2}},{"council_id":"ERW","missing":false,"latest_run":{"status_code":1,"log_text":"[14:05:11] Fetching Scraper for: ERW handlers.py:23\n Begin attempting to scrape: ERW handlers.py:27\n Deleting existing data... base.py:239\n[14:05:12] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:05:13] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.eastrenfrewshire.gov.uk/Find-my-councillor \n[14:05:15] Scraping from base.py:42\n https://www.eastrenfrewshire.gov.uk/councillor-angela-conv \n ery \n[14:05:16] 'NoneType' object is not subscriptable handlers.py:36\n Finished attempting to scrape: ERW base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"scrapers/ERW-east-renfrewshire/councillors.py\", line 50, in get_single_councillor\n contact_url = soup.select_one(\".panel__list--relarticles a.panel__link\")[\"href\"]\nTypeError: 'NoneType' object is not subscriptable\n","start":"2023-10-06 14:05:11.279421","end":"2023-10-06 14:05:16.763636","duration":5}},{"council_id":"FYL","missing":false,"latest_run":{"status_code":1,"log_text":"[14:06:07] Fetching Scraper for: FYL handlers.py:23\n Begin attempting to scrape: FYL handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[14:06:08] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n https://fylde.cmis.uk.com/fylde/CouncillorsandMP.aspx \n[14:06:10] 'title' handlers.py:36\n Finished attempting to scrape: FYL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 267, in get_single_councillor\n party = self.get_party_name(list_page_html)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 253, in get_party_name\n return list_page_html.find_all(\"img\")[-1][\"title\"].replace(\"(logo)\", \"\").strip()\n File \"/opt/python/bs4/element.py\", line 1573, in __getitem__\n return self.attrs[key]\nKeyError: 'title'\n","start":"2023-10-06 14:06:07.187672","end":"2023-10-06 14:06:10.289976","duration":3}},{"council_id":"HAO","missing":false,"latest_run":{"status_code":1,"log_text":"[13:55:12] Fetching Scraper for: HAO handlers.py:23\n Begin attempting to scrape: HAO handlers.py:27\n Deleting existing data... base.py:239\n[13:55:13] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:55:14] ...data deleted. base.py:246\n Scraping from base.py:42\n https://cmis.harborough.gov.uk/cmis5/Councillors.aspx \n HTTPSConnectionPool(host='cmis.harborough.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /cmis5/Councillors.aspx (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: HAO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cmis.harborough.gov.uk', port=443): Max retries exceeded with url: /cmis5/Councillors.aspx (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='cmis.harborough.gov.uk', port=443): Max retries exceeded with url: /cmis5/Councillors.aspx (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 13:55:12.596934","end":"2023-10-06 13:55:14.947815","duration":2}},{"council_id":"HER","missing":false,"latest_run":{"status_code":1,"log_text":"[14:31:27] Fetching Scraper for: HER handlers.py:23\n Begin attempting to scrape: HER handlers.py:27\n[14:31:28] Deleting existing data... base.py:239\n[14:31:30] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:31:31] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www5.hertsmere.gov.uk/democracy//mgWebService.asmx \n /GetCouncillorsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n https://www5.hertsmere.gov.uk/democracy//mgWebService. \n asmx/GetCouncillorsByWard \n Finished attempting to scrape: HER base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www5.hertsmere.gov.uk/democracy//mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-06 14:31:27.981714","end":"2023-10-06 14:31:31.457720","duration":3}},{"council_id":"HMF","missing":false,"latest_run":{"status_code":1,"log_text":"[13:31:24] Fetching Scraper for: HMF handlers.py:23\n Begin attempting to scrape: HMF handlers.py:27\n Deleting existing data... base.py:239\n[13:31:25] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCouncill \n orsByWard \n 404 Client Error: Not Found for url: handlers.py:36\n http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCoun \n cillorsByWard \n[13:31:26] Finished attempting to scrape: HMF base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://democracy.lbhf.gov.uk/mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-06 13:31:24.206953","end":"2023-10-06 13:31:26.241316","duration":2}},{"council_id":"HNS","missing":false,"latest_run":{"status_code":1,"log_text":"[12:47:13] Fetching Scraper for: HNS handlers.py:23\n Begin attempting to scrape: HNS handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[12:47:14] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n https://democraticservices.hounslow.gov.uk/mgWebService.as \n mx/GetCouncillorsByWard \n[12:47:15] HTTPSConnectionPool(host='democraticservices.hounslow. handlers.py:36\n gov.uk', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: HNS base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democraticservices.hounslow.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democraticservices.hounslow.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 12:47:13.110259","end":"2023-10-06 12:47:15.336098","duration":2}},{"council_id":"HUN","missing":false,"latest_run":{"status_code":1,"log_text":"[12:51:12] Fetching Scraper for: HUN handlers.py:23\n Begin attempting to scrape: HUN handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[12:51:13] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://applications.huntingdonshire.gov.uk/moderngov//mgWe \n bService.asmx/GetCouncillorsByWard \n[12:51:14] 500 Server Error: Internal Server Error for url: handlers.py:36\n http://applications.huntingdonshire.gov.uk/moderngov// \n mgWebService.asmx/GetCouncillorsByWard \n Finished attempting to scrape: HUN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http://applications.huntingdonshire.gov.uk/moderngov//mgWebService.asmx/GetCouncillorsByWard\n","start":"2023-10-06 12:51:12.115281","end":"2023-10-06 12:51:14.143941","duration":2}},{"council_id":"KEC","missing":false,"latest_run":{"status_code":1,"log_text":"[12:59:18] Fetching Scraper for: KEC handlers.py:23\n Begin attempting to scrape: KEC handlers.py:27\n Deleting existing data... base.py:239\n[12:59:19] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:59:20] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.rbkc.gov.uk/committees/Councillors.aspx \n[12:59:21] 404 Client Error: Not Found for url: handlers.py:36\n https://rbkc.moderngov.co.uk/Committees/mgError.aspx \n Finished attempting to scrape: KEC base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://rbkc.moderngov.co.uk/Committees/mgError.aspx\n","start":"2023-10-06 12:59:18.401677","end":"2023-10-06 12:59:21.387466","duration":2}},{"council_id":"LEE","missing":false,"latest_run":{"status_code":1,"log_text":"[12:28:02] Fetching Scraper for: LEE handlers.py:23\n Begin attempting to scrape: LEE handlers.py:27\n[12:28:03] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n[12:28:04] ...found 40 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n ...found 40 files in Councillors/raw base.py:207\n ...found 81 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 81 files base.py:216\n[12:28:05] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.lewes-eastbourne.gov.uk//mgWebService.asm \n x/GetCouncillorsByWard \n[12:30:19] argument of type 'NoneType' is not iterable handlers.py:36\n Committing batch 1 consisting of 80 files base.py:274\n[12:30:20] Finished attempting to scrape: LEE base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 183, in run\n councillor = self.get_single_councillor(ward, councillor_xml)\n File \"scrapers/LEE-lewes/councillors.py\", line 13, in get_single_councillor\n if \"lewes.gov.uk\" in email:\nTypeError: argument of type 'NoneType' is not iterable\n","start":"2023-10-06 12:28:02.965631","end":"2023-10-06 12:30:20.694863","duration":137}},{"council_id":"MIK","missing":false,"latest_run":{"status_code":1,"log_text":"[13:49:50] Fetching Scraper for: MIK handlers.py:23\n Begin attempting to scrape: MIK handlers.py:27\n[13:49:51] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:49:52] ...data deleted. base.py:246\n Scraping from base.py:42\n http://milton-keynes.cmis.uk.com/milton-keynes/Councillors \n .aspx \n 404 Client Error: Not Found for url: handlers.py:36\n http://milton-keynes.cmis.uk.com/milton-keynes/Council \n lors.aspx \n[13:49:53] Finished attempting to scrape: MIK base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://milton-keynes.cmis.uk.com/milton-keynes/Councillors.aspx\n","start":"2023-10-06 13:49:50.855827","end":"2023-10-06 13:49:53.007518","duration":2}},{"council_id":"MOL","missing":false,"latest_run":{"status_code":1,"log_text":"[14:17:15] Fetching Scraper for: MOL handlers.py:23\n[14:17:16] Begin attempting to scrape: MOL handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:17:17] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.molevalley.gov.uk/home/council/councillors/who \n -are-your-councillors \n HTTPSConnectionPool(host='www.molevalley.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /home/council/councillors/who-are-your-councillors \n (Caused by SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n[14:17:18] Finished attempting to scrape: MOL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='www.molevalley.gov.uk', port=443): Max retries exceeded with url: /home/council/councillors/who-are-your-councillors (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='www.molevalley.gov.uk', port=443): Max retries exceeded with url: /home/council/councillors/who-are-your-councillors (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 14:17:15.969177","end":"2023-10-06 14:17:18.232258","duration":2}},{"council_id":"NEL","missing":false,"latest_run":{"status_code":1,"log_text":"[12:37:53] Fetching Scraper for: NEL handlers.py:23\n Begin attempting to scrape: NEL handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[12:37:54] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.nelincs.gov.uk/your-council/councillors-mps-an \n d-meps/find-your-councillor/councillors-by-party/ \n[12:37:57] More than one element selected handlers.py:36\n[12:37:58] Finished attempting to scrape: NEL base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 144, in get_list_container\n raise ValueError(\"More than one element selected\")\nValueError: More than one element selected\n","start":"2023-10-06 12:37:53.114576","end":"2023-10-06 12:37:58.159433","duration":5}},{"council_id":"NNO","missing":false,"latest_run":{"status_code":1,"log_text":"[14:30:17] Fetching Scraper for: NNO handlers.py:23\n Begin attempting to scrape: NNO handlers.py:27\n[14:30:18] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:30:19] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.north-norfolk.gov.uk/members/#filter-form \n[14:30:21] list index out of range handlers.py:36\n Finished attempting to scrape: NNO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"scrapers/NNO-north-norfolk/councillors.py\", line 15, in get_councillors\n return super().get_councillors()[1:]\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-06 14:30:17.845899","end":"2023-10-06 14:30:21.882066","duration":4}},{"council_id":"OAD","missing":false,"latest_run":{"status_code":1,"log_text":"[14:05:54] Fetching Scraper for: OAD handlers.py:23\n Begin attempting to scrape: OAD handlers.py:27\n[14:05:55] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:05:56] ...data deleted. base.py:246\n Scraping from base.py:42\n http://moderngov.oadby-wigston.gov.uk/mgWebService.asmx/Ge \n tCouncillorsByWard \n ('Connection aborted.', ConnectionResetError(104, handlers.py:36\n 'Connection reset by peer')) \n[14:05:57] Finished attempting to scrape: OAD base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 466, in _make_request\n six.raise_from(e, None)\n File \"\", line 3, in raise_from\n File \"/opt/python/urllib3/connectionpool.py\", line 461, in _make_request\n httplib_response = conn.getresponse()\n File \"/var/lang/lib/python3.8/http/client.py\", line 1348, in getresponse\n response.begin()\n File \"/var/lang/lib/python3.8/http/client.py\", line 316, in begin\n version, status, reason = self._read_status()\n File \"/var/lang/lib/python3.8/http/client.py\", line 277, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n File \"/var/lang/lib/python3.8/socket.py\", line 669, in readinto\n return self._sock.recv_into(b)\nConnectionResetError: [Errno 104] Connection reset by peer\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 550, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/opt/python/urllib3/packages/six.py\", line 769, in reraise\n raise value.with_traceback(tb)\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 466, in _make_request\n six.raise_from(e, None)\n File \"\", line 3, in raise_from\n File \"/opt/python/urllib3/connectionpool.py\", line 461, in _make_request\n httplib_response = conn.getresponse()\n File \"/var/lang/lib/python3.8/http/client.py\", line 1348, in getresponse\n response.begin()\n File \"/var/lang/lib/python3.8/http/client.py\", line 316, in begin\n version, status, reason = self._read_status()\n File \"/var/lang/lib/python3.8/http/client.py\", line 277, in _read_status\n line = str(self.fp.readline(_MAXLINE + 1), \"iso-8859-1\")\n File \"/var/lang/lib/python3.8/socket.py\", line 669, in readinto\n return self._sock.recv_into(b)\nurllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 501, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\n","start":"2023-10-06 14:05:54.715150","end":"2023-10-06 14:05:57.190010","duration":2}},{"council_id":"ORK","missing":false,"latest_run":{"status_code":1,"log_text":"[13:50:08] Fetching Scraper for: ORK handlers.py:23\n Begin attempting to scrape: ORK handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:50:09] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.orkney.gov.uk/Council/Councillors/councillor-p \n rofiles.htm \n[13:50:10] 404 Client Error: Not Found for url: handlers.py:36\n https://www.orkney.gov.uk/Council/Councillors/councill \n or-profiles.htm \n Finished attempting to scrape: ORK base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.orkney.gov.uk/Council/Councillors/councillor-profiles.htm\n","start":"2023-10-06 13:50:08.004007","end":"2023-10-06 13:50:10.179470","duration":2}},{"council_id":"PEN","missing":false,"latest_run":{"status_code":1,"log_text":"[12:27:18] Fetching Scraper for: PEN handlers.py:23\n Begin attempting to scrape: PEN handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n[12:27:19] ...found 15 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n ...found 15 files in Councillors/raw base.py:207\n ...found 31 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 31 files base.py:216\n[12:27:20] ...data deleted. base.py:246\n Scraping from https://www.pendle.gov.uk/councillors/name base.py:42\n[12:27:22] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/76/mohammed_adnan \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/83/faraz_ahmad \n[12:27:23] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/2/nadeem_ahmed \n[12:27:24] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/91/sajjad_ahmed \n[12:27:25] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/78/david_albin \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/67/zafar_ali \n[12:27:26] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/88/mohammad_ammer \n[12:27:27] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/84/ruby_anwar \n[12:27:28] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/8/naeem_hussain_ashr \n af \n[12:27:29] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/94/mohammad_aslam \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/12/neil_butterworth \n[12:27:30] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/13/rosemary_e_carrol \n l \n[12:27:31] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/85/chris_church \n[12:27:32] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/75/david_cockburn-pr \n ice \n Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/64/sarah_cockburn-pr \n ice \n[12:27:33] Scraping from base.py:42\n https://www.pendle.gov.uk/councillors/92/david_gallear \n[12:27:34] 'NoneType' object is not subscriptable handlers.py:36\n Committing batch 1 consisting of 30 files base.py:274\n[12:27:36] Finished attempting to scrape: PEN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 51, in run\n councillor = self.get_single_councillor(councillor_html)\n File \"scrapers/PEN-pendle/councillors.py\", line 48, in get_single_councillor\n councillor.email = soup.select_one(\"li a[href^=mailto]\")[\"href\"].replace(\nTypeError: 'NoneType' object is not subscriptable\n","start":"2023-10-06 12:27:18.005223","end":"2023-10-06 12:27:36.130796","duration":18}},{"council_id":"ROS","missing":false,"latest_run":{"status_code":1,"log_text":"[13:56:55] Fetching Scraper for: ROS handlers.py:23\n Begin attempting to scrape: ROS handlers.py:27\n[13:56:56] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:56:57] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.rossendale.gov.uk/councillors/name \n 404 Client Error: Not Found for url: handlers.py:36\n https://www.rossendale.gov.uk/councillors/name \n Finished attempting to scrape: ROS base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.rossendale.gov.uk/councillors/name\n","start":"2023-10-06 13:56:55.833460","end":"2023-10-06 13:56:57.894341","duration":2}},{"council_id":"SFT","missing":false,"latest_run":{"status_code":1,"log_text":"[13:57:31] Fetching Scraper for: SFT handlers.py:23\n Begin attempting to scrape: SFT handlers.py:27\n Deleting existing data... base.py:239\n[13:57:32] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[13:57:33] ...data deleted. base.py:246\n Scraping from base.py:42\n http://modgov.sefton.gov.uk/mgWebService.asmx/GetCouncillo \n rsByWard \n HTTPSConnectionPool(host='modgov.sefton.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n Finished attempting to scrape: SFT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='modgov.sefton.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='modgov.sefton.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 13:57:31.486183","end":"2023-10-06 13:57:33.690731","duration":2}},{"council_id":"SHA","missing":false,"latest_run":{"status_code":1,"log_text":"[12:23:09] Fetching Scraper for: SHA handlers.py:23\n Begin attempting to scrape: SHA handlers.py:27\n Deleting existing data... base.py:239\n[12:23:10] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:23:11] ...data deleted. base.py:246\n Scraping from https://www.southhams.gov.uk/councillorsSH base.py:42\n[12:23:12] list index out of range handlers.py:36\n Finished attempting to scrape: SHA base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-06 12:23:09.154245","end":"2023-10-06 12:23:12.528500","duration":3}},{"council_id":"SHE","missing":false,"latest_run":{"status_code":null,"log_text":"[11:28:20] Fetching Scraper for: SHE handlers.py:22\n Begin attempting to scrape: SHE handlers.py:25\n Deleting existing data... base.py:234\n Getting all files in SHE... base.py:186\n[11:28:21] Getting all files in SHE/json... base.py:186\n ...found 30 files in SHE/json base.py:202\n Getting all files in SHE/raw... base.py:186\n ...found 30 files in SHE/raw base.py:202\n ...found 61 files in SHE base.py:202\n Deleting batch no. 1 consisting of 61 files base.py:211\n[11:28:32] An error occurred (ThrottlingException) when calling handlers.py:34\n the CreateCommit operation (reached max retries: 4): \n Rate exceeded \n Finished attempting to scrape: SHE base.py:319\n","errors":"An error occurred (ThrottlingException) when calling the CreateCommit operation (reached max retries: 4): Rate exceeded","start":"2022-04-04 11:28:20.509898","end":"2022-04-04 11:28:32.871624","duration":12}},{"council_id":"SHN","missing":false,"latest_run":{"status_code":1,"log_text":"[12:09:15] Fetching Scraper for: SHN handlers.py:23\n Begin attempting to scrape: SHN handlers.py:27\n Deleting existing data... base.py:239\n[12:09:16] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:09:17] ...data deleted. base.py:246\n Scraping from base.py:42\n http://moderngov.sthelens.gov.uk/mgWebService.asmx/GetCoun \n cillorsByWard \n[12:09:20] HTTPConnectionPool(host='moderngov.sthelens.gov.uk', handlers.py:36\n port=80): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 113] No route to host')) \n Finished attempting to scrape: SHN base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nOSError: [Errno 113] No route to host\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 415, in _make_request\n conn.request(method, url, **httplib_request_kw)\n File \"/opt/python/urllib3/connection.py\", line 244, in request\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1256, in request\n self._send_request(method, url, body, headers, encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1302, in _send_request\n self.endheaders(body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1251, in endheaders\n self._send_output(message_body, encode_chunked=encode_chunked)\n File \"/var/lang/lib/python3.8/http/client.py\", line 1011, in _send_output\n self.send(msg)\n File \"/var/lang/lib/python3.8/http/client.py\", line 951, in send\n self.connect()\n File \"/opt/python/urllib3/connection.py\", line 205, in connect\n conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 113] No route to host\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='moderngov.sthelens.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='moderngov.sthelens.gov.uk', port=80): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 113] No route to host'))\n","start":"2023-10-06 12:09:15.353047","end":"2023-10-06 12:09:20.629757","duration":5}},{"council_id":"SLG","missing":false,"latest_run":{"status_code":1,"log_text":"[13:14:12] Fetching Scraper for: SLG handlers.py:23\n Begin attempting to scrape: SLG handlers.py:27\n[13:14:13] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n Getting all files in Councillors/json... base.py:191\n[13:14:14] ...found 42 files in Councillors/json base.py:207\n Getting all files in Councillors/raw... base.py:191\n ...found 42 files in Councillors/raw base.py:207\n ...found 85 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 85 files base.py:216\n[13:14:15] ...data deleted. base.py:246\n Scraping from base.py:42\n https://democracy.slough.gov.uk/mgWebService.asmx/GetCounc \n illorsByWard \n[13:18:36] HTTPSConnectionPool(host='democracy.slough.gov.uk', handlers.py:36\n port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n NewConnectionError(': Failed to establish a new \n connection: [Errno 110] Connection timed out')) \n Finished attempting to scrape: SLG base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connection.py\", line 174, in _new_conn\n conn = connection.create_connection(\n File \"/opt/python/urllib3/util/connection.py\", line 95, in create_connection\n raise err\n File \"/opt/python/urllib3/util/connection.py\", line 85, in create_connection\n sock.connect(sa)\nTimeoutError: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 363, in connect\n self.sock = conn = self._new_conn()\n File \"/opt/python/urllib3/connection.py\", line 186, in _new_conn\n raise NewConnectionError(\nurllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 110] Connection timed out\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.slough.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 519, in send\n raise ConnectionError(e, request=request)\nrequests.exceptions.ConnectionError: HTTPSConnectionPool(host='democracy.slough.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 110] Connection timed out'))\n","start":"2023-10-06 13:14:12.815082","end":"2023-10-06 13:18:36.937122","duration":264}},{"council_id":"SNO","missing":false,"latest_run":{"status_code":1,"log_text":"[14:06:01] Fetching Scraper for: SNO handlers.py:23\n Begin attempting to scrape: SNO handlers.py:27\n[14:06:02] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[14:06:03] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.southnorfolkandbroadland.gov.uk/directory/3/so \n uth-norfolk-councillor-directory/category/11 \n 404 Client Error: Not Found for url: handlers.py:36\n https://www.southnorfolkandbroadland.gov.uk/directory/ \n 3/south-norfolk-councillor-directory/category/11 \n[14:06:04] Finished attempting to scrape: SNO base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 141, in get_list_container\n self.base_url_soup = self.get_page(self.base_url)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 130, in get_page\n page = self.get(url).text\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://www.southnorfolkandbroadland.gov.uk/directory/3/south-norfolk-councillor-directory/category/11\n","start":"2023-10-06 14:06:01.692164","end":"2023-10-06 14:06:04.050873","duration":2}},{"council_id":"SST","missing":false,"latest_run":{"status_code":1,"log_text":"[12:31:08] Fetching Scraper for: SST handlers.py:23\n Begin attempting to scrape: SST handlers.py:27\n Deleting existing data... base.py:239\n[12:31:09] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:31:10] ...data deleted. base.py:246\n Scraping from base.py:42\n https://services.sstaffs.gov.uk/cmis/Councillors.aspx \n 404 Client Error: Not Found for url: handlers.py:36\n https://services.sstaffs.gov.uk/cmis/Councillors.aspx \n Finished attempting to scrape: SST base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://services.sstaffs.gov.uk/cmis/Councillors.aspx\n","start":"2023-10-06 12:31:08.472063","end":"2023-10-06 12:31:10.738790","duration":2}},{"council_id":"STG","missing":false,"latest_run":{"status_code":1,"log_text":"[14:06:13] Fetching Scraper for: STG handlers.py:23\n Begin attempting to scrape: STG handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[14:06:14] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from https://www.stirling.gov.uk/councillors base.py:42\n[14:06:17] list index out of range handlers.py:36\n Finished attempting to scrape: STG base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 145, in get_list_container\n return selected[0]\nIndexError: list index out of range\n","start":"2023-10-06 14:06:13.065893","end":"2023-10-06 14:06:17.290602","duration":4}},{"council_id":"TES","missing":false,"latest_run":{"status_code":1,"log_text":"[13:28:45] Fetching Scraper for: TES handlers.py:23\n Begin attempting to scrape: TES handlers.py:27\n Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n[13:28:46] ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n ...data deleted. base.py:246\n Scraping from base.py:42\n http://testvalley.cmis.uk.com/testvalleypublic/ElectedRepr \n esentatives/tabid/63/ScreenMode/Alphabetical/Default.aspx# \n MemberSectionA \n[13:28:47] 404 Client Error: Not Found for url: handlers.py:36\n http://testvalley.cmis.uk.com/testvalleypublic/Elected \n Representatives/tabid/63/ScreenMode/Alphabetical/Defau \n lt.aspx#MemberSectionA \n Finished attempting to scrape: TES base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 248, in get_councillors\n req = self.get(self.base_url, extra_headers=self.extra_headers)\n File \"/var/task/lgsf/scrapers/base.py\", line 49, in get\n response.raise_for_status()\n File \"/opt/python/requests/models.py\", line 1021, in raise_for_status\n raise HTTPError(http_error_msg, response=self)\nrequests.exceptions.HTTPError: 404 Client Error: Not Found for url: http://testvalley.cmis.uk.com/testvalleypublic/ElectedRepresentatives/tabid/63/ScreenMode/Alphabetical/Default.aspx#MemberSectionA\n","start":"2023-10-06 13:28:45.093407","end":"2023-10-06 13:28:47.194628","duration":2}},{"council_id":"THE","missing":false,"latest_run":{"status_code":1,"log_text":"[12:44:42] Fetching Scraper for: THE handlers.py:23\n Begin attempting to scrape: THE handlers.py:27\n Deleting existing data... base.py:239\n[12:44:43] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:44:44] ...data deleted. base.py:246\n Scraping from base.py:42\n https://www.threerivers.gov.uk/listing/councillors \n[12:44:46] 'NoneType' object has no attribute 'findNext' handlers.py:36\n Finished attempting to scrape: THE base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"scrapers/THE-three-rivers/councillors.py\", line 13, in get_list_container\n return soup.find(\"h3\", text=\"District Councillor\").findNext(\"ul\")\nAttributeError: 'NoneType' object has no attribute 'findNext'\n","start":"2023-10-06 12:44:42.498061","end":"2023-10-06 12:44:46.341417","duration":3}},{"council_id":"TWH","missing":false,"latest_run":{"status_code":1,"log_text":"[12:52:06] Fetching Scraper for: TWH handlers.py:23\n Begin attempting to scrape: TWH handlers.py:27\n[12:52:07] Deleting existing data... base.py:239\n Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:52:08] ...data deleted. base.py:246\n Scraping from base.py:42\n http://democracy.towerhamlets.gov.uk/mgWebService.asmx/Get \n CouncillorsByWard \n HTTPSConnectionPool(host='democracy.towerhamlets.gov.u handlers.py:36\n k', port=443): Max retries exceeded with url: \n /mgWebService.asmx/GetCouncillorsByWard (Caused by \n SSLError(SSLCertVerificationError(1, '[SSL: \n CERTIFICATE_VERIFY_FAILED] certificate verify failed: \n unable to get local issuer certificate \n (_ssl.c:1131)'))) \n[12:52:09] Finished attempting to scrape: TWH base.py:324\n","errors":"Traceback (most recent call last):\n File \"/opt/python/urllib3/connectionpool.py\", line 714, in urlopen\n httplib_response = self._make_request(\n File \"/opt/python/urllib3/connectionpool.py\", line 403, in _make_request\n self._validate_conn(conn)\n File \"/opt/python/urllib3/connectionpool.py\", line 1053, in _validate_conn\n conn.connect()\n File \"/opt/python/urllib3/connection.py\", line 419, in connect\n self.sock = ssl_wrap_socket(\n File \"/opt/python/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket\n ssl_sock = _ssl_wrap_socket_impl(\n File \"/opt/python/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl\n return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n File \"/var/lang/lib/python3.8/ssl.py\", line 500, in wrap_socket\n return self.sslsocket_class._create(\n File \"/var/lang/lib/python3.8/ssl.py\", line 1040, in _create\n self.do_handshake()\n File \"/var/lang/lib/python3.8/ssl.py\", line 1309, in do_handshake\n self._sslobj.do_handshake()\nssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/opt/python/requests/adapters.py\", line 486, in send\n resp = conn.urlopen(\n File \"/opt/python/urllib3/connectionpool.py\", line 798, in urlopen\n retries = retries.increment(\n File \"/opt/python/urllib3/util/retry.py\", line 592, in increment\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\nurllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='democracy.towerhamlets.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 179, in run\n wards = self.get_councillors()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 196, in get_councillors\n req = self.get(self.format_councillor_api_url(), verify=self.verify_requests)\n File \"/var/task/lgsf/scrapers/base.py\", line 48, in get\n response = self.requests_session.get(url, headers=headers, verify=verify)\n File \"/opt/python/requests/sessions.py\", line 602, in get\n return self.request(\"GET\", url, **kwargs)\n File \"/opt/python/requests/sessions.py\", line 589, in request\n resp = self.send(prep, **send_kwargs)\n File \"/opt/python/requests/sessions.py\", line 725, in send\n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 725, in \n history = [resp for resp in gen]\n File \"/opt/python/requests/sessions.py\", line 266, in resolve_redirects\n resp = self.send(\n File \"/opt/python/requests/sessions.py\", line 703, in send\n r = adapter.send(request, **kwargs)\n File \"/opt/python/requests/adapters.py\", line 517, in send\n raise SSLError(e, request=request)\nrequests.exceptions.SSLError: HTTPSConnectionPool(host='democracy.towerhamlets.gov.uk', port=443): Max retries exceeded with url: /mgWebService.asmx/GetCouncillorsByWard (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1131)')))\n","start":"2023-10-06 12:52:06.754799","end":"2023-10-06 12:52:09.023681","duration":2}},{"council_id":"WRT","missing":false,"latest_run":{"status_code":1,"log_text":"[12:13:28] Fetching Scraper for: WRT handlers.py:23\n Begin attempting to scrape: WRT handlers.py:27\n Deleting existing data... base.py:239\n[12:13:29] Getting all files in Councillors... base.py:191\n ...found 1 files in Councillors base.py:207\n Deleting batch no. 1 consisting of 1 files base.py:216\n[12:13:30] ...data deleted. base.py:246\n Scraping from https://www.warrington.gov.uk/councillors base.py:42\n[12:13:32] More than one element selected handlers.py:36\n[12:13:33] Finished attempting to scrape: WRT base.py:324\n","errors":"Traceback (most recent call last):\n File \"/var/task/lgsf/aws_lambda/handlers.py\", line 32, in scraper_worker_handler\n scraper.run(run_log)\n File \"/var/task/lgsf/councillors/scrapers.py\", line 49, in run\n for councillor_html in self.get_councillors():\n File \"/var/task/lgsf/councillors/scrapers.py\", line 148, in get_councillors\n container = self.get_list_container()\n File \"/var/task/lgsf/councillors/scrapers.py\", line 144, in get_list_container\n raise ValueError(\"More than one element selected\")\nValueError: More than one element selected\n","start":"2023-10-06 12:13:28.351552","end":"2023-10-06 12:13:33.144637","duration":4}}] diff --git a/index.html b/index.html index 5e3d9a04ef..4b0fb32ffa 100644 --- a/index.html +++ b/index.html @@ -49,10 +49,6 @@

All log runs

- -
-
-
@@ -129,6 +125,10 @@

All log runs

+ +
+
+
@@ -145,10 +145,6 @@

All log runs

- -
-
-
@@ -225,6 +221,10 @@

All log runs

+ +
+
+
@@ -241,10 +241,6 @@

All log runs

- -
-
-
@@ -321,6 +317,10 @@

All log runs

+ +
+
+
@@ -337,10 +337,6 @@

All log runs

- -
-
-
@@ -417,6 +413,10 @@

All log runs

+ +
+
+
@@ -433,10 +433,6 @@

All log runs

- -
-
-
@@ -513,6 +509,10 @@

All log runs

+ +
+
+
@@ -529,10 +529,6 @@

All log runs

- -
-
-
@@ -609,6 +605,10 @@

All log runs

+ +
+
+
@@ -625,10 +625,6 @@

All log runs

- -
-
-
@@ -705,6 +701,10 @@

All log runs

+ +
+
+
@@ -721,10 +721,6 @@

All log runs

- -
-
-
@@ -801,6 +797,10 @@

All log runs

+ +
+
+
@@ -817,10 +817,6 @@

All log runs

- -
-
-
@@ -897,6 +893,10 @@

All log runs

+ +
+
+
@@ -913,10 +913,6 @@

All log runs

- -
-
-
@@ -993,6 +989,10 @@

All log runs

+ +
+
+
@@ -1009,10 +1009,6 @@

All log runs

- -
-
-
@@ -1089,6 +1085,10 @@

All log runs

+ +
+
+
@@ -1105,10 +1105,6 @@

All log runs

- -
-
-
@@ -1185,6 +1181,10 @@

All log runs

+ +
+
+
@@ -1201,10 +1201,6 @@

All log runs

- -
-
-
@@ -1281,6 +1277,10 @@

All log runs

+ +
+
+
@@ -1297,10 +1297,6 @@

All log runs

- -
-
-
@@ -1377,6 +1373,10 @@

All log runs

+ +
+
+
@@ -1393,10 +1393,6 @@

All log runs

- -
-
-
@@ -1473,6 +1469,10 @@

All log runs

+ +
+
+
@@ -1489,10 +1489,6 @@

All log runs

- -
-
-
@@ -1569,6 +1565,10 @@

All log runs

+ +
+
+
@@ -1585,10 +1585,6 @@

All log runs

- -
-
-
@@ -1665,6 +1661,10 @@

All log runs

+ +
+
+
@@ -1681,10 +1681,6 @@

All log runs

- -
-
-
@@ -1761,6 +1757,10 @@

All log runs

+ +
+
+
@@ -1777,10 +1777,6 @@

All log runs

- -
-
-
@@ -1857,6 +1853,10 @@

All log runs

+ +
+
+
@@ -1873,10 +1873,6 @@

All log runs

- -
-
-
@@ -1953,6 +1949,10 @@

All log runs

+ +
+
+
@@ -1969,10 +1969,6 @@

All log runs

- -
-
-
@@ -2049,6 +2045,10 @@

All log runs

+ +
+
+
@@ -2065,10 +2065,6 @@

All log runs

- -
-
-
@@ -2145,6 +2141,10 @@

All log runs

+ +
+
+
@@ -2161,10 +2161,6 @@

All log runs

- -
-
-
@@ -2241,6 +2237,10 @@

All log runs

+ +
+
+
@@ -2257,10 +2257,6 @@

All log runs

- -
-
-
@@ -2337,6 +2333,10 @@

All log runs

+ +
+
+
@@ -2353,10 +2353,6 @@

All log runs

- -
-
-
@@ -2433,6 +2429,10 @@

All log runs

+ +
+
+
@@ -2449,10 +2449,6 @@

All log runs

- -
-
-
@@ -2529,6 +2525,10 @@

All log runs

+ +
+
+
@@ -2545,10 +2545,6 @@

All log runs

- -
-
-
@@ -2625,6 +2621,10 @@

All log runs

+ +
+
+
@@ -2641,10 +2641,6 @@

All log runs

- -
-
-
@@ -2721,6 +2717,10 @@

All log runs

+ +
+
+
@@ -2737,10 +2737,6 @@

All log runs

- -
-
-
@@ -2817,6 +2813,10 @@

All log runs

+ +
+
+
@@ -2833,10 +2833,6 @@

All log runs

- -
-
-
@@ -2913,6 +2909,10 @@

All log runs

+ +
+
+
@@ -2929,10 +2929,6 @@

All log runs

- -
-
-
@@ -3009,6 +3005,10 @@

All log runs

+ +
+
+
@@ -3025,10 +3025,6 @@

All log runs

- -
-
-
@@ -3105,6 +3101,10 @@

All log runs

+ +
+
+
@@ -3121,10 +3121,6 @@

All log runs

- -
-
-
@@ -3201,6 +3197,10 @@

All log runs

+ +
+
+
@@ -3217,10 +3217,6 @@

All log runs

- -
-
-
@@ -3297,6 +3293,10 @@

All log runs

+ +
+
+
@@ -3313,10 +3313,6 @@

All log runs

- -
-
-
@@ -3393,6 +3389,10 @@

All log runs

+ +
+
+
@@ -3409,10 +3409,6 @@

All log runs

- -
-
-
@@ -3489,6 +3485,10 @@

All log runs

+ +
+
+
@@ -3505,10 +3505,6 @@

All log runs

- -
-
-
@@ -3585,6 +3581,10 @@

All log runs

+ +
+
+
@@ -3601,10 +3601,6 @@

All log runs

- -
-
-
@@ -3681,6 +3677,10 @@

All log runs

+ +
+
+
@@ -3697,10 +3697,6 @@

All log runs

- -
-
-
@@ -3777,6 +3773,10 @@

All log runs

+ +
+
+
@@ -3793,10 +3793,6 @@

All log runs

- -
-
-
@@ -3873,26 +3869,26 @@

All log runs

+ +
+
+
- + BRT - ERROR + OK
- -
-
-
@@ -3969,6 +3965,10 @@

All log runs

+ +
+
+
@@ -3985,10 +3985,6 @@

All log runs

- -
-
-
@@ -4065,6 +4061,10 @@

All log runs

+ +
+
+
@@ -4081,10 +4081,6 @@

All log runs

- -
-
-
@@ -4161,6 +4157,10 @@

All log runs

+ +
+
+
@@ -4177,10 +4177,6 @@

All log runs

- -
-
-
@@ -4257,6 +4253,10 @@

All log runs

+ +
+
+
@@ -4273,10 +4273,6 @@

All log runs

- -
-
-
@@ -4353,6 +4349,10 @@

All log runs

+ +
+
+
@@ -4369,10 +4369,6 @@

All log runs

- -
-
-
@@ -4449,6 +4445,10 @@

All log runs

+ +
+
+
@@ -4465,10 +4465,6 @@

All log runs

- -
-
-
@@ -4545,6 +4541,10 @@

All log runs

+ +
+
+
@@ -4561,10 +4561,6 @@

All log runs

- -
-
-
@@ -4641,6 +4637,10 @@

All log runs

+ +
+
+
@@ -4657,10 +4657,6 @@

All log runs

- -
-
-
@@ -4737,6 +4733,10 @@

All log runs

+ +
+
+
@@ -4753,10 +4753,6 @@

All log runs

- -
-
-
@@ -4833,6 +4829,10 @@

All log runs

+ +
+
+
@@ -4849,10 +4849,6 @@

All log runs

- -
-
-
@@ -4929,6 +4925,10 @@

All log runs

+ +
+
+
@@ -4945,10 +4945,6 @@

All log runs

- -
-
-
@@ -5025,6 +5021,10 @@

All log runs

+ +
+
+
@@ -5041,10 +5041,6 @@

All log runs

- -
-
-
@@ -5121,6 +5117,10 @@

All log runs

+ +
+
+
@@ -5137,10 +5137,6 @@

All log runs

- -
-
-
@@ -5217,6 +5213,10 @@

All log runs

+ +
+
+
@@ -5233,10 +5233,6 @@

All log runs

- -
-
-
@@ -5313,6 +5309,10 @@

All log runs

+ +
+
+
@@ -5329,10 +5329,6 @@

All log runs

- -
-
-
@@ -5409,6 +5405,10 @@

All log runs

+ +
+
+
@@ -5425,10 +5425,6 @@

All log runs

- -
-
-
@@ -5505,6 +5501,10 @@

All log runs

+ +
+
+
@@ -5521,10 +5521,6 @@

All log runs

- -
-
-
@@ -5601,6 +5597,10 @@

All log runs

+ +
+
+
@@ -5617,10 +5617,6 @@

All log runs

- -
-
-
@@ -5697,6 +5693,10 @@

All log runs

+ +
+
+
@@ -5713,10 +5713,6 @@

All log runs

- -
-
-
@@ -5793,6 +5789,10 @@

All log runs

+ +
+
+
@@ -5809,10 +5809,6 @@

All log runs

- -
-
-
@@ -5889,6 +5885,10 @@

All log runs

+ +
+
+
@@ -5905,10 +5905,6 @@

All log runs

- -
-
-
@@ -5985,6 +5981,10 @@

All log runs

+ +
+
+
@@ -6001,10 +6001,6 @@

All log runs

- -
-
-
@@ -6081,6 +6077,10 @@

All log runs

+ +
+
+
@@ -6097,10 +6097,6 @@

All log runs

- -
-
-
@@ -6177,6 +6173,10 @@

All log runs

+ +
+
+
@@ -6193,10 +6193,6 @@

All log runs

- -
-
-
@@ -6273,6 +6269,10 @@

All log runs

+ +
+
+
@@ -6289,10 +6289,6 @@

All log runs

- -
-
-
@@ -6369,6 +6365,10 @@

All log runs

+ +
+
+
@@ -6385,10 +6385,6 @@

All log runs

- -
-
-
@@ -6465,6 +6461,10 @@

All log runs

+ +
+
+
@@ -6481,10 +6481,6 @@

All log runs

- -
-
-
@@ -6561,6 +6557,10 @@

All log runs

+ +
+
+
@@ -6577,10 +6577,6 @@

All log runs

- -
-
-
@@ -6657,6 +6653,10 @@

All log runs

+ +
+
+
@@ -6673,10 +6673,6 @@

All log runs

- -
-
-
@@ -6753,6 +6749,10 @@

All log runs

+ +
+
+
@@ -6769,10 +6769,6 @@

All log runs

- -
-
-
@@ -6849,6 +6845,10 @@

All log runs

+ +
+
+
@@ -6865,10 +6865,6 @@

All log runs

- -
-
-
@@ -6945,6 +6941,10 @@

All log runs

+ +
+
+
@@ -6961,10 +6961,6 @@

All log runs

- -
-
-
@@ -7041,6 +7037,10 @@

All log runs

+ +
+
+
@@ -7057,10 +7057,6 @@

All log runs

- -
-
-
@@ -7137,6 +7133,10 @@

All log runs

+ +
+
+
@@ -7153,10 +7153,6 @@

All log runs

- -
-
-
@@ -7233,6 +7229,10 @@

All log runs

+ +
+
+
@@ -7249,10 +7249,6 @@

All log runs

- -
-
-
@@ -7329,6 +7325,10 @@

All log runs

+ +
+
+
@@ -7345,10 +7345,6 @@

All log runs

- -
-
-
@@ -7425,6 +7421,10 @@

All log runs

+ +
+
+
@@ -7441,10 +7441,6 @@

All log runs

- -
-
-
@@ -7521,6 +7517,10 @@

All log runs

+ +
+
+
@@ -7537,10 +7537,6 @@

All log runs

- -
-
-
@@ -7617,6 +7613,10 @@

All log runs

+ +
+
+
@@ -7633,10 +7633,6 @@

All log runs

- -
-
-
@@ -7713,6 +7709,10 @@

All log runs

+ +
+
+
@@ -7729,10 +7729,6 @@

All log runs

- -
-
-
@@ -7809,6 +7805,10 @@

All log runs

+ +
+
+
@@ -7825,10 +7825,6 @@

All log runs

- -
-
-
@@ -7905,6 +7901,10 @@

All log runs

+ +
+
+
@@ -7921,10 +7921,6 @@

All log runs

- -
-
-
@@ -8001,6 +7997,10 @@

All log runs

+ +
+
+
@@ -8017,10 +8017,6 @@

All log runs

- -
-
-
@@ -8097,6 +8093,10 @@

All log runs

+ +
+
+
@@ -8113,10 +8113,6 @@

All log runs

- -
-
-
@@ -8193,6 +8189,10 @@

All log runs

+ +
+
+
@@ -8209,10 +8209,6 @@

All log runs

- -
-
-
@@ -8289,6 +8285,10 @@

All log runs

+ +
+
+
@@ -8305,10 +8305,6 @@

All log runs

- -
-
-
@@ -8385,6 +8381,10 @@

All log runs

+ +
+
+
@@ -8401,10 +8401,6 @@

All log runs

- -
-
-
@@ -8481,6 +8477,10 @@

All log runs

+ +
+
+
@@ -8497,10 +8497,6 @@

All log runs

- -
-
-
@@ -8577,6 +8573,10 @@

All log runs

+ +
+
+
@@ -8593,10 +8593,6 @@

All log runs

- -
-
-
@@ -8673,6 +8669,10 @@

All log runs

+ +
+
+
@@ -8689,10 +8689,6 @@

All log runs

- -
-
-
@@ -8769,6 +8765,10 @@

All log runs

+ +
+
+
@@ -8785,10 +8785,6 @@

All log runs

- -
-
-
@@ -8865,6 +8861,10 @@

All log runs

+ +
+
+
@@ -8881,10 +8881,6 @@

All log runs

- -
-
-
@@ -8961,6 +8957,10 @@

All log runs

+ +
+
+
@@ -8977,10 +8977,6 @@

All log runs

- -
-
-
@@ -9057,6 +9053,10 @@

All log runs

+ +
+
+
@@ -9073,10 +9073,6 @@

All log runs

- -
-
-
@@ -9153,6 +9149,10 @@

All log runs

+ +
+
+
@@ -9169,10 +9169,6 @@

All log runs

- -
-
-
@@ -9249,6 +9245,10 @@

All log runs

+ +
+
+
@@ -9265,10 +9265,6 @@

All log runs

- -
-
-
@@ -9345,6 +9341,10 @@

All log runs

+ +
+
+
@@ -9361,10 +9361,6 @@

All log runs

- -
-
-
@@ -9441,6 +9437,10 @@

All log runs

+ +
+
+
@@ -9457,10 +9457,6 @@

All log runs

- -
-
-
@@ -9537,6 +9533,10 @@

All log runs

+ +
+
+
@@ -9553,10 +9553,6 @@

All log runs

- -
-
-
@@ -9633,6 +9629,10 @@

All log runs

+ +
+
+
@@ -9649,10 +9649,6 @@

All log runs

- -
-
-
@@ -9729,6 +9725,10 @@

All log runs

+ +
+
+
@@ -9745,10 +9745,6 @@

All log runs

- -
-
-
@@ -9825,6 +9821,10 @@

All log runs

+ +
+
+
@@ -9841,10 +9841,6 @@

All log runs

- -
-
-
@@ -9921,6 +9917,10 @@

All log runs

+ +
+
+
@@ -9937,10 +9937,6 @@

All log runs

- -
-
-
@@ -10017,6 +10013,10 @@

All log runs

+ +
+
+
@@ -10033,10 +10033,6 @@

All log runs

- -
-
-
@@ -10113,6 +10109,10 @@

All log runs

+ +
+
+
@@ -10129,10 +10129,6 @@

All log runs

- -
-
-
@@ -10209,6 +10205,10 @@

All log runs

+ +
+
+
@@ -10225,10 +10225,6 @@

All log runs

- -
-
-
@@ -10305,6 +10301,10 @@

All log runs

+ +
+
+
@@ -10321,10 +10321,6 @@

All log runs

- -
-
-
@@ -10401,6 +10397,10 @@

All log runs

+ +
+
+
@@ -10417,10 +10417,6 @@

All log runs

- -
-
-
@@ -10497,6 +10493,10 @@

All log runs

+ +
+
+
@@ -10513,10 +10513,6 @@

All log runs

- -
-
-
@@ -10593,6 +10589,10 @@

All log runs

+ +
+
+
@@ -10609,10 +10609,6 @@

All log runs

- -
-
-
@@ -10689,6 +10685,10 @@

All log runs

+ +
+
+
@@ -10705,10 +10705,6 @@

All log runs

- -
-
-
@@ -10785,6 +10781,10 @@

All log runs

+ +
+
+
@@ -10801,10 +10801,6 @@

All log runs

- -
-
-
@@ -10881,6 +10877,10 @@

All log runs

+ +
+
+
@@ -10897,10 +10897,6 @@

All log runs

- -
-
-
@@ -10977,6 +10973,10 @@

All log runs

+ +
+
+
@@ -10993,83 +10993,83 @@

All log runs

- +
- +
- +
- -
+
+
- +
- -
-
- - -
+
+
- -
+
+
- +
- +
- +
- +
- +
- -
+
+
- +
- +
- + +
+
+ +
- +
- -
+
+
- +
@@ -11089,10 +11089,6 @@

All log runs

- -
-
-
@@ -11169,6 +11165,10 @@

All log runs

+ +
+
+
@@ -11185,10 +11185,6 @@

All log runs

- -
-
-
@@ -11265,6 +11261,10 @@

All log runs

+ +
+
+
@@ -11281,10 +11281,6 @@

All log runs

- -
-
-
@@ -11361,6 +11357,10 @@

All log runs

+ +
+
+
@@ -11377,10 +11377,6 @@

All log runs

- -
-
-
@@ -11457,6 +11453,10 @@

All log runs

+ +
+
+
@@ -11473,10 +11473,6 @@

All log runs

- -
-
-
@@ -11553,6 +11549,10 @@

All log runs

+ +
+
+
@@ -11569,10 +11569,6 @@

All log runs

- -
-
-
@@ -11649,6 +11645,10 @@

All log runs

+ +
+
+
@@ -11665,10 +11665,6 @@

All log runs

- -
-
-
@@ -11745,6 +11741,10 @@

All log runs

+ +
+
+
@@ -11761,10 +11761,6 @@

All log runs

- -
-
-
@@ -11841,6 +11837,10 @@

All log runs

+ +
+
+
@@ -11857,10 +11857,6 @@

All log runs

- -
-
-
@@ -11937,6 +11933,10 @@

All log runs

+ +
+
+
@@ -11953,10 +11953,6 @@

All log runs

- -
-
-
@@ -12033,6 +12029,10 @@

All log runs

+ +
+
+
@@ -12049,10 +12049,6 @@

All log runs

- -
-
-
@@ -12129,6 +12125,10 @@

All log runs

+ +
+
+
@@ -12145,83 +12145,83 @@

All log runs

- -
+
+
- +
- +
- -
+
+
- +
- +
- +
- -
+
+
- -
+
+
- +
- +
- -
+
+
- +
- +
- +
- -
+
+
- -
+
+
- +
- +
- +
@@ -12241,10 +12241,6 @@

All log runs

- -
-
-
@@ -12321,6 +12317,10 @@

All log runs

+ +
+
+
@@ -12337,10 +12337,6 @@

All log runs

- -
-
-
@@ -12417,6 +12413,10 @@

All log runs

+ +
+
+
@@ -12433,10 +12433,6 @@

All log runs

- -
-
-
@@ -12513,6 +12509,10 @@

All log runs

+ +
+
+
@@ -12529,10 +12529,6 @@

All log runs

- -
-
-
@@ -12609,6 +12605,10 @@

All log runs

+ +
+
+
@@ -12625,10 +12625,6 @@

All log runs

- -
-
-
@@ -12705,6 +12701,10 @@

All log runs

+ +
+
+
@@ -12721,10 +12721,6 @@

All log runs

- -
-
-
@@ -12801,6 +12797,10 @@

All log runs

+ +
+
+
@@ -12817,10 +12817,6 @@

All log runs

- -
-
-
@@ -12897,6 +12893,10 @@

All log runs

+ +
+
+
@@ -12913,10 +12913,6 @@

All log runs

- -
-
-
@@ -12993,6 +12989,10 @@

All log runs

+ +
+
+
@@ -13009,10 +13009,6 @@

All log runs

- -
-
-
@@ -13089,6 +13085,10 @@

All log runs

+ +
+
+
@@ -13105,10 +13105,6 @@

All log runs

- -
-
-
@@ -13185,6 +13181,10 @@

All log runs

+ +
+
+
@@ -13201,10 +13201,6 @@

All log runs

- -
-
-
@@ -13281,6 +13277,10 @@

All log runs

+ +
+
+
@@ -13297,10 +13297,6 @@

All log runs

- -
-
-
@@ -13377,6 +13373,10 @@

All log runs

+ +
+
+
@@ -13393,10 +13393,6 @@

All log runs

- -
-
-
@@ -13473,6 +13469,10 @@

All log runs

+ +
+
+
@@ -13489,10 +13489,6 @@

All log runs

- -
-
-
@@ -13569,6 +13565,10 @@

All log runs

+ +
+
+
@@ -13585,10 +13585,6 @@

All log runs

- -
-
-
@@ -13665,6 +13661,10 @@

All log runs

+ +
+
+
@@ -13681,10 +13681,6 @@

All log runs

- -
-
-
@@ -13761,6 +13757,10 @@

All log runs

+ +
+
+
@@ -13777,10 +13777,6 @@

All log runs

- -
-
-
@@ -13857,6 +13853,10 @@

All log runs

+ +
+
+
@@ -13873,10 +13873,6 @@

All log runs

- -
-
-
@@ -13953,6 +13949,10 @@

All log runs

+ +
+
+
@@ -13969,10 +13969,6 @@

All log runs

- -
-
-
@@ -14049,6 +14045,10 @@

All log runs

+ +
+
+
@@ -14065,10 +14065,6 @@

All log runs

- -
-
-
@@ -14145,6 +14141,10 @@

All log runs

+ +
+
+
@@ -14161,10 +14161,6 @@

All log runs

- -
-
-
@@ -14241,6 +14237,10 @@

All log runs

+ +
+
+
@@ -14257,10 +14257,6 @@

All log runs

- -
-
-
@@ -14337,6 +14333,10 @@

All log runs

+ +
+
+
@@ -14353,10 +14353,6 @@

All log runs

- -
-
-
@@ -14433,6 +14429,10 @@

All log runs

+ +
+
+
@@ -14449,10 +14449,6 @@

All log runs

- -
-
-
@@ -14529,6 +14525,10 @@

All log runs

+ +
+
+
@@ -14545,10 +14545,6 @@

All log runs

- -
-
-
@@ -14625,6 +14621,10 @@

All log runs

+ +
+
+
@@ -14641,10 +14641,6 @@

All log runs

- -
-
-
@@ -14721,6 +14717,10 @@

All log runs

+ +
+
+
@@ -14737,10 +14737,6 @@

All log runs

- -
-
-
@@ -14817,6 +14813,10 @@

All log runs

+ +
+
+
@@ -14833,10 +14833,6 @@

All log runs

- -
-
-
@@ -14913,6 +14909,10 @@

All log runs

+ +
+
+
@@ -14929,10 +14929,6 @@

All log runs

- -
-
-
@@ -15009,6 +15005,10 @@

All log runs

+ +
+
+
@@ -15025,10 +15025,6 @@

All log runs

- -
-
-
@@ -15105,6 +15101,10 @@

All log runs

+ +
+
+
@@ -15121,10 +15121,6 @@

All log runs

- -
-
-
@@ -15201,6 +15197,10 @@

All log runs

+ +
+
+
@@ -15217,10 +15217,6 @@

All log runs

- -
-
-
@@ -15297,6 +15293,10 @@

All log runs

+ +
+
+
@@ -15313,10 +15313,6 @@

All log runs

- -
-
-
@@ -15393,6 +15389,10 @@

All log runs

+ +
+
+
@@ -15409,10 +15409,6 @@

All log runs

- -
-
-
@@ -15489,6 +15485,10 @@

All log runs

+ +
+
+
@@ -15505,10 +15505,6 @@

All log runs

- -
-
-
@@ -15585,6 +15581,10 @@

All log runs

+ +
+
+
@@ -15601,10 +15601,6 @@

All log runs

- -
-
-
@@ -15681,6 +15677,10 @@

All log runs

+ +
+
+
@@ -15697,10 +15697,6 @@

All log runs

- -
-
-
@@ -15777,6 +15773,10 @@

All log runs

+ +
+
+
@@ -15793,10 +15793,6 @@

All log runs

- -
-
-
@@ -15873,6 +15869,10 @@

All log runs

+ +
+
+
@@ -15889,10 +15889,6 @@

All log runs

- -
-
-
@@ -15969,6 +15965,10 @@

All log runs

+ +
+
+
@@ -15985,10 +15985,6 @@

All log runs

- -
-
-
@@ -16065,6 +16061,10 @@

All log runs

+ +
+
+
@@ -16081,10 +16081,6 @@

All log runs

- -
-
-
@@ -16161,6 +16157,10 @@

All log runs

+ +
+
+
@@ -16177,10 +16177,6 @@

All log runs

- -
-
-
@@ -16257,6 +16253,10 @@

All log runs

+ +
+
+
@@ -16273,10 +16273,6 @@

All log runs

- -
-
-
@@ -16353,6 +16349,10 @@

All log runs

+ +
+
+
@@ -16369,10 +16369,6 @@

All log runs

- -
-
-
@@ -16449,6 +16445,10 @@

All log runs

+ +
+
+
@@ -16465,10 +16465,6 @@

All log runs

- -
-
-
@@ -16545,6 +16541,10 @@

All log runs

+ +
+
+
@@ -16561,10 +16561,6 @@

All log runs

- -
-
-
@@ -16641,6 +16637,10 @@

All log runs

+ +
+
+
@@ -16657,10 +16657,6 @@

All log runs

- -
-
-
@@ -16737,6 +16733,10 @@

All log runs

+ +
+
+
@@ -16753,10 +16753,6 @@

All log runs

- -
-
-
@@ -16833,6 +16829,10 @@

All log runs

+ +
+
+
@@ -16849,10 +16849,6 @@

All log runs

- -
-
-
@@ -16929,6 +16925,10 @@

All log runs

+ +
+
+
@@ -16945,10 +16945,6 @@

All log runs

- -
-
-
@@ -17025,6 +17021,10 @@

All log runs

+ +
+
+
@@ -17041,10 +17041,6 @@

All log runs

- -
-
-
@@ -17121,6 +17117,10 @@

All log runs

+ +
+
+
@@ -17137,10 +17137,6 @@

All log runs

- -
-
-
@@ -17217,6 +17213,10 @@

All log runs

+ +
+
+
@@ -17233,10 +17233,6 @@

All log runs

- -
-
-
@@ -17313,6 +17309,10 @@

All log runs

+ +
+
+
@@ -17329,10 +17329,6 @@

All log runs

- -
-
-
@@ -17409,6 +17405,10 @@

All log runs

+ +
+
+
@@ -17425,10 +17425,6 @@

All log runs

- -
-
-
@@ -17505,6 +17501,10 @@

All log runs

+ +
+
+
@@ -17521,10 +17521,6 @@

All log runs

- -
-
-
@@ -17601,6 +17597,10 @@

All log runs

+ +
+
+
@@ -17617,10 +17617,6 @@

All log runs

- -
-
-
@@ -17697,6 +17693,10 @@

All log runs

+ +
+
+
@@ -17713,10 +17713,6 @@

All log runs

- -
-
-
@@ -17793,6 +17789,10 @@

All log runs

+ +
+
+
@@ -17809,10 +17809,6 @@

All log runs

- -
-
-
@@ -17889,6 +17885,10 @@

All log runs

+ +
+
+
@@ -17905,10 +17905,6 @@

All log runs

- -
-
-
@@ -17985,6 +17981,10 @@

All log runs

+ +
+
+
@@ -18001,10 +18001,6 @@

All log runs

- -
-
-
@@ -18081,6 +18077,10 @@

All log runs

+ +
+
+
@@ -18097,10 +18097,6 @@

All log runs

- -
-
-
@@ -18177,6 +18173,10 @@

All log runs

+ +
+
+
@@ -18193,10 +18193,6 @@

All log runs

- -
-
-
@@ -18273,6 +18269,10 @@

All log runs

+ +
+
+
@@ -18289,10 +18289,6 @@

All log runs

- -
-
-
@@ -18369,6 +18365,10 @@

All log runs

+ +
+
+
@@ -18385,10 +18385,6 @@

All log runs

- -
-
-
@@ -18465,6 +18461,10 @@

All log runs

+ +
+
+
@@ -18481,10 +18481,6 @@

All log runs

- -
-
-
@@ -18561,6 +18557,10 @@

All log runs

+ +
+
+
@@ -18577,10 +18577,6 @@

All log runs

- -
-
-
@@ -18657,6 +18653,10 @@

All log runs

+ +
+
+
@@ -18673,10 +18673,6 @@

All log runs

- -
-
-
@@ -18753,6 +18749,10 @@

All log runs

+ +
+
+
@@ -18769,10 +18769,6 @@

All log runs

- -
-
-
@@ -18849,6 +18845,10 @@

All log runs

+ +
+
+
@@ -18865,10 +18865,6 @@

All log runs

- -
-
-
@@ -18945,6 +18941,10 @@

All log runs

+ +
+
+
@@ -18961,10 +18961,6 @@

All log runs

- -
-
-
@@ -19041,6 +19037,10 @@

All log runs

+ +
+
+
@@ -19057,10 +19057,6 @@

All log runs

- -
-
-
@@ -19137,6 +19133,10 @@

All log runs

+ +
+
+
@@ -19153,10 +19153,6 @@

All log runs

- -
-
-
@@ -19233,6 +19229,10 @@

All log runs

+ +
+
+
@@ -19249,10 +19249,6 @@

All log runs

- -
-
-
@@ -19329,6 +19325,10 @@

All log runs

+ +
+
+
@@ -19345,10 +19345,6 @@

All log runs

- -
-
-
@@ -19425,6 +19421,10 @@

All log runs

+ +
+
+
@@ -19441,10 +19441,6 @@

All log runs

- -
-
-
@@ -19521,6 +19517,10 @@

All log runs

+ +
+
+
@@ -19537,10 +19537,6 @@

All log runs

- -
-
-
@@ -19617,6 +19613,10 @@

All log runs

+ +
+
+
@@ -19633,10 +19633,6 @@

All log runs

- -
-
-
@@ -19713,6 +19709,10 @@

All log runs

+ +
+
+
@@ -19729,10 +19729,6 @@

All log runs

- -
-
-
@@ -19809,6 +19805,10 @@

All log runs

+ +
+
+
@@ -19825,10 +19825,6 @@

All log runs

- -
-
-
@@ -19905,6 +19901,10 @@

All log runs

+ +
+
+
@@ -19921,10 +19921,6 @@

All log runs

- -
-
-
@@ -20001,6 +19997,10 @@

All log runs

+ +
+
+
@@ -20017,10 +20017,6 @@

All log runs

- -
-
-
@@ -20097,6 +20093,10 @@

All log runs

+ +
+
+
@@ -20113,10 +20113,6 @@

All log runs

- -
-
-
@@ -20193,6 +20189,10 @@

All log runs

+ +
+
+
@@ -20209,10 +20209,6 @@

All log runs

- -
-
-
@@ -20289,6 +20285,10 @@

All log runs

+ +
+
+
@@ -20305,10 +20305,6 @@

All log runs

- -
-
-
@@ -20385,6 +20381,10 @@

All log runs

+ +
+
+
@@ -20401,10 +20401,6 @@

All log runs

- -
-
-
@@ -20481,6 +20477,10 @@

All log runs

+ +
+
+
@@ -20497,10 +20497,6 @@

All log runs

- -
-
-
@@ -20577,6 +20573,10 @@

All log runs

+ +
+
+
@@ -20593,10 +20593,6 @@

All log runs

- -
-
-
@@ -20673,6 +20669,10 @@

All log runs

+ +
+
+
@@ -20689,10 +20689,6 @@

All log runs

- -
-
-
@@ -20769,6 +20765,10 @@

All log runs

+ +
+
+
@@ -20785,10 +20785,6 @@

All log runs

- -
-
-
@@ -20865,6 +20861,10 @@

All log runs

+ +
+
+
@@ -20881,10 +20881,6 @@

All log runs

- -
-
-
@@ -20961,6 +20957,10 @@

All log runs

+ +
+
+
@@ -20977,10 +20977,6 @@

All log runs

- -
-
-
@@ -21057,6 +21053,10 @@

All log runs

+ +
+
+
@@ -21073,10 +21073,6 @@

All log runs

- -
-
-
@@ -21153,6 +21149,10 @@

All log runs

+ +
+
+
@@ -21169,10 +21169,6 @@

All log runs

- -
-
-
@@ -21249,6 +21245,10 @@

All log runs

+ +
+
+
@@ -21265,10 +21265,6 @@

All log runs

- -
-
-
@@ -21345,6 +21341,10 @@

All log runs

+ +
+
+
@@ -21361,10 +21361,6 @@

All log runs

- -
-
-
@@ -21441,6 +21437,10 @@

All log runs

+ +
+
+
@@ -21457,10 +21457,6 @@

All log runs

- -
-
-
@@ -21537,6 +21533,10 @@

All log runs

+ +
+
+
@@ -21553,10 +21553,6 @@

All log runs

- -
-
-
@@ -21633,6 +21629,10 @@

All log runs

+ +
+
+
@@ -21649,10 +21649,6 @@

All log runs

- -
-
-
@@ -21729,6 +21725,10 @@

All log runs

+ +
+
+
@@ -21745,10 +21745,6 @@

All log runs

- -
-
-
@@ -21825,6 +21821,10 @@

All log runs

+ +
+
+
@@ -21841,10 +21841,6 @@

All log runs

- -
-
-
@@ -21921,6 +21917,10 @@

All log runs

+ +
+
+
@@ -21937,10 +21937,6 @@

All log runs

- -
-
-
@@ -22017,6 +22013,10 @@

All log runs

+ +
+
+
@@ -22033,10 +22033,6 @@

All log runs

- -
-
-
@@ -22113,6 +22109,10 @@

All log runs

+ +
+
+
@@ -22129,10 +22129,6 @@

All log runs

- -
-
-
@@ -22209,6 +22205,10 @@

All log runs

+ +
+
+
@@ -22225,10 +22225,6 @@

All log runs

- -
-
-
@@ -22305,6 +22301,10 @@

All log runs

+ +
+
+
@@ -22321,10 +22321,6 @@

All log runs

- -
-
-
@@ -22401,6 +22397,10 @@

All log runs

+ +
+
+
@@ -22417,10 +22417,6 @@

All log runs

- -
-
-
@@ -22497,6 +22493,10 @@

All log runs

+ +
+
+
@@ -22513,10 +22513,6 @@

All log runs

- -
-
-
@@ -22593,6 +22589,10 @@

All log runs

+ +
+
+
@@ -22609,10 +22609,6 @@

All log runs

- -
-
-
@@ -22689,6 +22685,10 @@

All log runs

+ +
+
+
@@ -22705,10 +22705,6 @@

All log runs

- -
-
-
@@ -22785,6 +22781,10 @@

All log runs

+ +
+
+
@@ -22801,10 +22801,6 @@

All log runs

- -
-
-
@@ -22881,6 +22877,10 @@

All log runs

+ +
+
+
@@ -22897,10 +22897,6 @@

All log runs

- -
-
-
@@ -22977,6 +22973,10 @@

All log runs

+ +
+
+
@@ -22993,10 +22993,6 @@

All log runs

- -
-
-
@@ -23073,6 +23069,10 @@

All log runs

+ +
+
+
@@ -23089,10 +23089,6 @@

All log runs

- -
-
-
@@ -23169,6 +23165,10 @@

All log runs

+ +
+
+
@@ -23185,10 +23185,6 @@

All log runs

- -
-
-
@@ -23265,6 +23261,10 @@

All log runs

+ +
+
+
@@ -23281,10 +23281,6 @@

All log runs

- -
-
-
@@ -23361,6 +23357,10 @@

All log runs

+ +
+
+
@@ -23377,10 +23377,6 @@

All log runs

- -
-
-
@@ -23457,6 +23453,10 @@

All log runs

+ +
+
+
@@ -23473,10 +23473,6 @@

All log runs

- -
-
-
@@ -23553,6 +23549,10 @@

All log runs

+ +
+
+
@@ -23569,10 +23569,6 @@

All log runs

- -
-
-
@@ -23649,6 +23645,10 @@

All log runs

+ +
+
+
@@ -23665,10 +23665,6 @@

All log runs

- -
-
-
@@ -23745,6 +23741,10 @@

All log runs

+ +
+
+
@@ -23761,10 +23761,6 @@

All log runs

- -
-
-
@@ -23841,6 +23837,10 @@

All log runs

+ +
+
+
@@ -23857,10 +23857,6 @@

All log runs

- -
-
-
@@ -23937,6 +23933,10 @@

All log runs

+ +
+
+
@@ -23953,10 +23953,6 @@

All log runs

- -
-
-
@@ -24033,6 +24029,10 @@

All log runs

+ +
+
+
@@ -24049,10 +24049,6 @@

All log runs

- -
-
-
@@ -24129,6 +24125,10 @@

All log runs

+ +
+
+
@@ -24145,10 +24145,6 @@

All log runs

- -
-
-
@@ -24225,6 +24221,10 @@

All log runs

+ +
+
+
@@ -24241,10 +24241,6 @@

All log runs

- -
-
-
@@ -24321,6 +24317,10 @@

All log runs

+ +
+
+
@@ -24337,10 +24337,6 @@

All log runs

- -
-
-
@@ -24417,6 +24413,10 @@

All log runs

+ +
+
+
@@ -24433,10 +24433,6 @@

All log runs

- -
-
-
@@ -24513,6 +24509,10 @@

All log runs

+ +
+
+
@@ -24529,10 +24529,6 @@

All log runs

- -
-
-
@@ -24609,6 +24605,10 @@

All log runs

+ +
+
+
@@ -24625,10 +24625,6 @@

All log runs

- -
-
-
@@ -24705,6 +24701,10 @@

All log runs

+ +
+
+
@@ -24721,83 +24721,83 @@

All log runs

- +
- +
- -
-
- - +
- +
- +
- +
- -
+
+
- -
+
+
- +
- +
- +
- +
- + +
+
+ +
- -
+
+
- -
+
+
- +
- +
- +
- +
@@ -24817,10 +24817,6 @@

All log runs

- -
-
-
@@ -24897,6 +24893,10 @@

All log runs

+ +
+
+
@@ -24913,10 +24913,6 @@

All log runs

- -
-
-
@@ -24993,6 +24989,10 @@

All log runs

+ +
+
+
@@ -25009,10 +25009,6 @@

All log runs

- -
-
-
@@ -25089,6 +25085,10 @@

All log runs

+ +
+
+
@@ -25201,10 +25201,6 @@

All log runs

- -
-
-
@@ -25281,6 +25277,10 @@

All log runs

+ +
+
+
@@ -25297,10 +25297,6 @@

All log runs

- -
-
-
@@ -25377,6 +25373,10 @@

All log runs

+ +
+
+
@@ -25393,10 +25393,6 @@

All log runs

- -
-
-
@@ -25473,6 +25469,10 @@

All log runs

+ +
+
+
@@ -25489,10 +25489,6 @@

All log runs

- -
-
-
@@ -25569,6 +25565,10 @@

All log runs

+ +
+
+
@@ -25585,10 +25585,6 @@

All log runs

- -
-
-
@@ -25665,6 +25661,10 @@

All log runs

+ +
+
+
@@ -25681,10 +25681,6 @@

All log runs

- -
-
-
@@ -25761,6 +25757,10 @@

All log runs

+ +
+
+
@@ -25777,10 +25777,6 @@

All log runs

- -
-
-
@@ -25857,26 +25853,26 @@

All log runs

+ +
+
+
- + SLG - OK + ERROR
- -
-
-
@@ -25953,6 +25949,10 @@

All log runs

+ +
+
+
@@ -25969,10 +25969,6 @@

All log runs

- -
-
-
@@ -26049,6 +26045,10 @@

All log runs

+ +
+
+
@@ -26065,10 +26065,6 @@

All log runs

- -
-
-
@@ -26145,6 +26141,10 @@

All log runs

+ +
+
+
@@ -26161,10 +26161,6 @@

All log runs

- -
-
-
@@ -26241,6 +26237,10 @@

All log runs

+ +
+
+
@@ -26257,10 +26257,6 @@

All log runs

- -
-
-
@@ -26337,6 +26333,10 @@

All log runs

+ +
+
+
@@ -26353,10 +26353,6 @@

All log runs

- -
-
-
@@ -26433,6 +26429,10 @@

All log runs

+ +
+
+
@@ -26449,10 +26449,6 @@

All log runs

- -
-
-
@@ -26529,6 +26525,10 @@

All log runs

+ +
+
+
@@ -26545,10 +26545,6 @@

All log runs

- -
-
-
@@ -26625,6 +26621,10 @@

All log runs

+ +
+
+
@@ -26641,10 +26641,6 @@

All log runs

- -
-
-
@@ -26721,6 +26717,10 @@

All log runs

+ +
+
+
@@ -26737,10 +26737,6 @@

All log runs

- -
-
-
@@ -26817,6 +26813,10 @@

All log runs

+ +
+
+
@@ -26833,10 +26833,6 @@

All log runs

- -
-
-
@@ -26913,6 +26909,10 @@

All log runs

+ +
+
+
@@ -26929,10 +26929,6 @@

All log runs

- -
-
-
@@ -27009,6 +27005,10 @@

All log runs

+ +
+
+
@@ -27025,10 +27025,6 @@

All log runs

- -
-
-
@@ -27105,6 +27101,10 @@

All log runs

+ +
+
+
@@ -27121,10 +27121,6 @@

All log runs

- -
-
-
@@ -27201,6 +27197,10 @@

All log runs

+ +
+
+
@@ -27217,10 +27217,6 @@

All log runs

- -
-
-
@@ -27297,6 +27293,10 @@

All log runs

+ +
+
+
@@ -27313,10 +27313,6 @@

All log runs

- -
-
-
@@ -27393,6 +27389,10 @@

All log runs

+ +
+
+
@@ -27409,10 +27409,6 @@

All log runs

- -
-
-
@@ -27489,6 +27485,10 @@

All log runs

+ +
+
+
@@ -27505,10 +27505,6 @@

All log runs

- -
-
-
@@ -27585,6 +27581,10 @@

All log runs

+ +
+
+
@@ -27601,10 +27601,6 @@

All log runs

- -
-
-
@@ -27681,6 +27677,10 @@

All log runs

+ +
+
+
@@ -27697,10 +27697,6 @@

All log runs

- -
-
-
@@ -27777,6 +27773,10 @@

All log runs

+ +
+
+
@@ -27793,10 +27793,6 @@

All log runs

- -
-
-
@@ -27873,6 +27869,10 @@

All log runs

+ +
+
+
@@ -27889,10 +27889,6 @@

All log runs

- -
-
-
@@ -27969,6 +27965,10 @@

All log runs

+ +
+
+
@@ -27985,10 +27985,6 @@

All log runs

- -
-
-
@@ -28065,6 +28061,10 @@

All log runs

+ +
+
+
@@ -28081,10 +28081,6 @@

All log runs

- -
-
-
@@ -28161,6 +28157,10 @@

All log runs

+ +
+
+
@@ -28177,10 +28177,6 @@

All log runs

- -
-
-
@@ -28257,6 +28253,10 @@

All log runs

+ +
+
+
@@ -28273,10 +28273,6 @@

All log runs

- -
-
-
@@ -28353,6 +28349,10 @@

All log runs

+ +
+
+
@@ -28369,10 +28369,6 @@

All log runs

- -
-
-
@@ -28449,6 +28445,10 @@

All log runs

+ +
+
+
@@ -28465,10 +28465,6 @@

All log runs

- -
-
-
@@ -28545,6 +28541,10 @@

All log runs

+ +
+
+
@@ -28561,10 +28561,6 @@

All log runs

- -
-
-
@@ -28641,6 +28637,10 @@

All log runs

+ +
+
+
@@ -28657,10 +28657,6 @@

All log runs

- -
-
-
@@ -28737,6 +28733,10 @@

All log runs

+ +
+
+
@@ -28753,10 +28753,6 @@

All log runs

- -
-
-
@@ -28833,6 +28829,10 @@

All log runs

+ +
+
+
@@ -28849,10 +28849,6 @@

All log runs

- -
-
-
@@ -28929,6 +28925,10 @@

All log runs

+ +
+
+
@@ -28945,10 +28945,6 @@

All log runs

- -
-
-
@@ -29025,6 +29021,10 @@

All log runs

+ +
+
+
@@ -29041,10 +29041,6 @@

All log runs

- -
-
-
@@ -29121,6 +29117,10 @@

All log runs

+ +
+
+
@@ -29137,10 +29137,6 @@

All log runs

- -
-
-
@@ -29217,6 +29213,10 @@

All log runs

+ +
+
+
@@ -29233,10 +29233,6 @@

All log runs

- -
-
-
@@ -29313,6 +29309,10 @@

All log runs

+ +
+
+
@@ -29329,10 +29329,6 @@

All log runs

- -
-
-
@@ -29409,6 +29405,10 @@

All log runs

+ +
+
+
@@ -29425,10 +29425,6 @@

All log runs

- -
-
-
@@ -29505,6 +29501,10 @@

All log runs

+ +
+
+
@@ -29521,10 +29521,6 @@

All log runs

- -
-
-
@@ -29601,6 +29597,10 @@

All log runs

+ +
+
+
@@ -29617,10 +29617,6 @@

All log runs

- -
-
-
@@ -29697,6 +29693,10 @@

All log runs

+ +
+
+
@@ -29713,10 +29713,6 @@

All log runs

- -
-
-
@@ -29793,6 +29789,10 @@

All log runs

+ +
+
+
@@ -29809,10 +29809,6 @@

All log runs

- -
-
-
@@ -29889,6 +29885,10 @@

All log runs

+ +
+
+
@@ -29905,10 +29905,6 @@

All log runs

- -
-
-
@@ -29985,6 +29981,10 @@

All log runs

+ +
+
+
@@ -30001,10 +30001,6 @@

All log runs

- -
-
-
@@ -30081,6 +30077,10 @@

All log runs

+ +
+
+
@@ -30097,10 +30097,6 @@

All log runs

- -
-
-
@@ -30177,6 +30173,10 @@

All log runs

+ +
+
+
@@ -30193,10 +30193,6 @@

All log runs

- -
-
-
@@ -30273,6 +30269,10 @@

All log runs

+ +
+
+
@@ -30289,10 +30289,6 @@

All log runs

- -
-
-
@@ -30369,6 +30365,10 @@

All log runs

+ +
+
+
@@ -30385,10 +30385,6 @@

All log runs

- -
-
-
@@ -30465,6 +30461,10 @@

All log runs

+ +
+
+
@@ -30481,10 +30481,6 @@

All log runs

- -
-
-
@@ -30561,6 +30557,10 @@

All log runs

+ +
+
+
@@ -30577,10 +30577,6 @@

All log runs

- -
-
-
@@ -30657,6 +30653,10 @@

All log runs

+ +
+
+
@@ -30673,10 +30673,6 @@

All log runs

- -
-
-
@@ -30753,6 +30749,10 @@

All log runs

+ +
+
+
@@ -30769,10 +30769,6 @@

All log runs

- -
-
-
@@ -30849,6 +30845,10 @@

All log runs

+ +
+
+
@@ -30865,10 +30865,6 @@

All log runs

- -
-
-
@@ -30945,6 +30941,10 @@

All log runs

+ +
+
+
@@ -30961,10 +30961,6 @@

All log runs

- -
-
-
@@ -31041,6 +31037,10 @@

All log runs

+ +
+
+
@@ -31057,10 +31057,6 @@

All log runs

- -
-
-
@@ -31137,6 +31133,10 @@

All log runs

+ +
+
+
@@ -31153,10 +31153,6 @@

All log runs

- -
-
-
@@ -31233,6 +31229,10 @@

All log runs

+ +
+
+
@@ -31249,10 +31249,6 @@

All log runs

- -
-
-
@@ -31329,6 +31325,10 @@

All log runs

+ +
+
+
@@ -31345,10 +31345,6 @@

All log runs

- -
-
-
@@ -31425,6 +31421,10 @@

All log runs

+ +
+
+
@@ -31441,10 +31441,6 @@

All log runs

- -
-
-
@@ -31521,6 +31517,10 @@

All log runs

+ +
+
+
@@ -31537,10 +31537,6 @@

All log runs

- -
-
-
@@ -31617,6 +31613,10 @@

All log runs

+ +
+
+
@@ -31633,10 +31633,6 @@

All log runs

- -
-
-
@@ -31713,6 +31709,10 @@

All log runs

+ +
+
+
@@ -31729,10 +31729,6 @@

All log runs

- -
-
-
@@ -31809,6 +31805,10 @@

All log runs

+ +
+
+
@@ -31825,10 +31825,6 @@

All log runs

- -
-
-
@@ -31905,6 +31901,10 @@

All log runs

+ +
+
+
@@ -31921,10 +31921,6 @@

All log runs

- -
-
-
@@ -32001,6 +31997,10 @@

All log runs

+ +
+
+
@@ -32017,10 +32017,6 @@

All log runs

- -
-
-
@@ -32097,6 +32093,10 @@

All log runs

+ +
+
+
@@ -32113,10 +32113,6 @@

All log runs

- -
-
-
@@ -32193,6 +32189,10 @@

All log runs

+ +
+
+
@@ -32209,10 +32209,6 @@

All log runs

- -
-
-
@@ -32289,6 +32285,10 @@

All log runs

+ +
+
+
@@ -32305,10 +32305,6 @@

All log runs

- -
-
-
@@ -32385,6 +32381,10 @@

All log runs

+ +
+
+
@@ -32401,10 +32401,6 @@

All log runs

- -
-
-
@@ -32481,6 +32477,10 @@

All log runs

+ +
+
+
@@ -32497,10 +32497,6 @@

All log runs

- -
-
-
@@ -32577,6 +32573,10 @@

All log runs

+ +
+
+
@@ -32593,10 +32593,6 @@

All log runs

- -
-
-
@@ -32673,6 +32669,10 @@

All log runs

+ +
+
+
@@ -32689,10 +32689,6 @@

All log runs

- -
-
-
@@ -32769,6 +32765,10 @@

All log runs

+ +
+
+
@@ -32785,10 +32785,6 @@

All log runs

- -
-
-
@@ -32865,6 +32861,10 @@

All log runs

+ +
+
+
@@ -32881,10 +32881,6 @@

All log runs

- -
-
-
@@ -32961,6 +32957,10 @@

All log runs

+ +
+
+
@@ -32977,10 +32977,6 @@

All log runs

- -
-
-
@@ -33057,6 +33053,10 @@

All log runs

+ +
+
+
@@ -33073,10 +33073,6 @@

All log runs

- -
-
-
@@ -33153,6 +33149,10 @@

All log runs

+ +
+
+
@@ -33169,10 +33169,6 @@

All log runs

- -
-
-
@@ -33249,6 +33245,10 @@

All log runs

+ +
+
+
@@ -33265,10 +33265,6 @@

All log runs

- -
-
-
@@ -33345,6 +33341,10 @@

All log runs

+ +
+
+
@@ -33361,10 +33361,6 @@

All log runs

- -
-
-
@@ -33441,6 +33437,10 @@

All log runs

+ +
+
+
@@ -33457,10 +33457,6 @@

All log runs

- -
-
-
@@ -33537,6 +33533,10 @@

All log runs

+ +
+
+
@@ -33553,10 +33553,6 @@

All log runs

- -
-
-
@@ -33633,6 +33629,10 @@

All log runs

+ +
+
+
@@ -33649,10 +33649,6 @@

All log runs

- -
-
-
@@ -33729,6 +33725,10 @@

All log runs

+ +
+
+
@@ -33745,10 +33745,6 @@

All log runs

- -
-
-
@@ -33825,6 +33821,10 @@

All log runs

+ +
+
+
@@ -33841,84 +33841,84 @@

All log runs

@@ -33937,10 +33937,6 @@

All log runs

- -
-
-
@@ -34017,6 +34013,10 @@

All log runs

+ +
+
+
diff --git a/logbooks/ABC/index.html b/logbooks/ABC/index.html index 16e43e6d5e..7531020345 100644 --- a/logbooks/ABC/index.html +++ b/logbooks/ABC/index.html @@ -37,6 +37,56 @@ +

2023-10-06

+
+
+
+
+
Duration
+
6 seconds
+
+
+
Start
+
2023-10-06 12:09:49.944297
+
+
+
End
+
2023-10-06 12:09:56.602153
+
+
+
Status code
+
0
+
+
+
Error
+
+
+
+
+
+ +

Run log

+
[12:09:49] Fetching Scraper for: ABC                              handlers.py:23
+           Begin attempting to scrape: ABC                        handlers.py:27
+[12:09:50] Deleting existing data...                                 base.py:239
+           Getting all files in Councillors...                       base.py:191
+           Getting all files in Councillors/json...                  base.py:191
+[12:09:51] ...found 16 files in Councillors/json                     base.py:207
+           Getting all files in Councillors/raw...                   base.py:191
+           ...found 16 files in Councillors/raw                      base.py:207
+           ...found 33 files in Councillors                          base.py:207
+           Deleting batch no. 1 consisting of 33 files               base.py:216
+[12:09:52] ...data deleted.                                          base.py:246
+           Scraping from                                              base.py:42
+           https://www.armaghbanbridgecraigavon.gov.uk/councillors/             
+[12:09:55] Committing batch 1 consisting of 32 files                 base.py:274
+[12:09:56] Finished attempting to scrape: ABC                        base.py:324
+
+ + + + +

2023-10-05

@@ -984,56 +1034,6 @@

Run log

- - - -

2023-09-16

-
-
-
-
-
Duration
-
6 seconds
-
-
-
Start
-
2023-09-16 13:53:49.779752
-
-
-
End
-
2023-09-16 13:53:56.415655
-
-
-
Status code
-
0
-
-
-
Error
-
-
-
-
-
- -

Run log

-
[13:53:49] Fetching Scraper for: ABC                              handlers.py:23
-           Begin attempting to scrape: ABC                        handlers.py:27
-[13:53:50] Deleting existing data...                                 base.py:239
-           Getting all files in Councillors...                       base.py:191
-           Getting all files in Councillors/json...                  base.py:191
-[13:53:51] ...found 16 files in Councillors/json                     base.py:207
-           Getting all files in Councillors/raw...                   base.py:191
-           ...found 16 files in Councillors/raw                      base.py:207
-           ...found 33 files in Councillors                          base.py:207
-           Deleting batch no. 1 consisting of 33 files               base.py:216
-           ...data deleted.                                          base.py:246
-           Scraping from                                              base.py:42
-           https://www.armaghbanbridgecraigavon.gov.uk/councillors/             
-[13:53:55] Committing batch 1 consisting of 32 files                 base.py:274
-[13:53:56] Finished attempting to scrape: ABC                        base.py:324
-
- -