-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
126 lines (91 loc) · 3.61 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import configparser
import bs4
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from urllib.parse import urlparse, urlunparse, urljoin
def spider(target, exclude):
parsed_target = urlparse(target)
return spider_rec(dict(), target, parsed_target, exclude)
def spider_rec(page_links, current_href, base_parse, exclude):
target_url = urlunparse(base_parse)
parse_result = urlparse(urljoin(target_url, current_href))
req = Request(urlunparse(parse_result))
postfix = parse_result.path
if parse_result.query:
postfix += "?" + parse_result.query
if len(postfix) == 0:
postfix = "/"
try:
html_page = urlopen(req)
if parse_result.hostname == base_parse.hostname:
page_links[postfix] = []
soup = bs4.BeautifulSoup(html_page, "lxml")
for link in soup.findAll('a'):
href = link.get('href')
href = href.replace(" ", "%20")
if "mailto:" not in href:
if not urlparse(href).hostname:
href_parse = urlparse(urljoin(target_url, href))
href = href_parse.path
if href_parse.query:
href += "?" + href_parse.query
if href not in page_links[postfix]:
page_links[postfix].append(href)
found = False
for key in page_links.keys() - [postfix]:
for link in page_links[key]:
if href == key or href == link:
found = True
break
if not found:
found = False
for d in exclude:
if d in href:
found = True
break
if found:
continue
spider_rec(page_links, href, base_parse, exclude)
except HTTPError as e:
if e.code == 400 or e.code in range(404, 500):
if parse_result.hostname == base_parse.hostname:
page_links[postfix] = e
else:
page_links[current_href] = e
return page_links
def main():
print("Reading conf...")
config = configparser.ConfigParser()
config.read('crawl.conf')
config = config['Config']
target = config['site']
ignores = config['ignore'].split(', ')
print("Crawling site...")
pages = spider(target, ignores)
print(f"Crawled {len(pages)} pages.")
testedLinks = []
for key in pages.keys():
testedLinks += [key]
if type(pages[key]) != HTTPError:
testedLinks += pages[key]
testedLinks = list(set(testedLinks))
print(f"Tested {len(testedLinks)} links.")
count = 0
for link in pages.keys():
if type(pages[link]) == HTTPError:
count += 1
found = []
for search_link in pages.keys():
if type(pages[search_link]) != HTTPError:
for href in pages[search_link]:
if href == link:
found.append(search_link)
print('\n' + ''.join(['='] * 100))
print(link, pages[link].status, pages[link].reason)
print(''.join(['-'] * 100))
print("Found in:")
for href in found:
print(href)
print(''.join(['='] * 100))
print("Done.")
main()