-
Notifications
You must be signed in to change notification settings - Fork 1
/
prices_123wow.py
237 lines (212 loc) · 7.83 KB
/
prices_123wow.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import sys
import os
import glob
from zipfile import ZipFile
import time
import datetime
import schedule
import re
import csv
import random
import logging
from rich.logging import RichHandler
from rich.progress import track
from urllib.request import urlopen
from bs4 import BeautifulSoup
# Parameters
SITE_NAME = "123wow"
BASE_URL = "https://123wow.vn/"
PROJECT_PATH = re.sub("/py$", "", os.getcwd())
PATH_HTML = PROJECT_PATH + "/html/" + SITE_NAME + "/"
PATH_CSV = PROJECT_PATH + "/csv/" + SITE_NAME + "/"
PATH_LOG = PROJECT_PATH + "/log/"
DATE = str(datetime.date.today())
OBSERVATION = 0
# Setting up logging
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(rich_tracebacks=True)]
)
log = logging.getLogger("rich")
# Defining main functions
def main():
try:
daily_task()
except Exception as e:
log.exception('Got exception, scraper stopped')
log.info(type(e).__name__ + str(e))
# Compress data and html files
compress_csv()
compress_html()
log.info('Finished. Hibernating until next day...')
def daily_task():
"""Main workhorse function. Support functions defined below"""
global CATEGORIES_PAGES
global DATE
global OBSERVATION
log.info('Scraper started')
# Refresh date
DATE = str(datetime.date.today())
OBSERVATION = 0
# Download topsite and get categories directories
base_file_name = "All_cat_" + DATE + ".html"
fetch_html(BASE_URL, base_file_name, PATH_HTML, attempts_limit=1000)
html_file = open(PATH_HTML + base_file_name).read()
CATEGORIES_PAGES = get_category_list(html_file)
log.info(f"Found {str(len(CATEGORIES_PAGES))} categories")
# Read each categories pages and scrape for data
for cat in track(CATEGORIES_PAGES,
description = "[green]Scraping...",
total = len(CATEGORIES_PAGES)):
cat_file = "cat_" + cat['name'] + "_" + DATE + ".html"
download = fetch_html(cat['directlink'], cat_file, PATH_HTML)
if download:
scrap_data(cat)
find_next_page(cat)
def fetch_html(url, file_name, path, attempts_limit=5):
"""Fetch and download a html with provided path and file names"""
if not os.path.exists(path):
os.makedirs(path)
if os.path.isfile(path + file_name) is False:
attempts = 0
while attempts < attempts_limit:
try:
con = urlopen(url, timeout=5)
html_content = con.read()
with open(path + file_name, "wb") as f:
f.write(html_content)
con.close
log.debug(f"Downloaded: {file_name}")
return(True)
except Exception:
attempts += 1
log.debug(f"Downloaded: {file_name}")
else:
log.error(f"Cannot download {file_name}")
return(False)
else:
log.debug(f"Already downloaded {file_name}")
return(True)
def get_category_list(top_html):
"""Get list of relative categories directories from the top page"""
page_list = []
toppage_soup = BeautifulSoup(top_html, "lxml")
categories = toppage_soup.find('ul', {'id': 'sample-menu-1'})
categories = categories.findAll('li')
categories_tag = [cat.findAll('a') for cat in categories]
categories_tag = [item for sublist in categories_tag for item in sublist]
for cat in categories_tag:
page = {}
link = re.sub(".+123wow\\.vn/", "", cat['href'])
page['relativelink'] = link
page['directlink'] = BASE_URL + link + '?limit=100'
page['name'] = re.sub("/|\\?.=", "_", link)
page['label'] = cat.text
page_list.append(page)
# Remove duplicates
page_list = [dict(t) for t in set(tuple(i.items()) for i in page_list)]
return(page_list)
def scrap_data(cat):
"""Get item data from a category page and write to csv"""
global OBSERVATION
cat_file = open(PATH_HTML + "cat_" + cat['name'] + "_" +
DATE + ".html").read()
cat_soup = BeautifulSoup(cat_file, "lxml")
cat_div = cat_soup.findAll("div", {"class": "product-inner clearfix"})
if cat_div is None:
cat_div = []
for item in cat_div:
row = {}
good_name = item.find('div', {"class": "name"})
row['good_name'] = good_name.text if good_name else None
price = item.find('span', {"class": "price-new"})
if price is None:
price = item.find('div', {'class': 'price'})
row['price'] = price.text.strip() if price else None
old_price = item.find('span', {"class": "price-old"})
row['old_price'] = old_price.text if old_price else None
id1 = good_name.find("a")
row['id'] = id1.get('href') if id1 else None
row['category'] = cat['name']
row['category_label'] = cat['label']
row['date'] = DATE
OBSERVATION += 1
write_data(row)
def find_next_page(cat):
"""Find the next page button, return page data"""
cat_file = open(PATH_HTML + "cat_" + cat['name'] + "_" +
DATE + ".html").read()
cat_soup = BeautifulSoup(cat_file, "lxml")
pagination = cat_soup.find('div', {'class': 'pagination'})
if pagination:
pagination_a = pagination.findAll('a')
pagination_text = [p.text for p in pagination_a]
if '>' in pagination_text:
next_button = pagination_a[pagination_text.index('>')]
else:
next_button = None
else:
next_button = None
if next_button:
link = re.sub(".+123wow\\.vn", "", next_button['href'])
if link not in [i['relativelink'] for i in CATEGORIES_PAGES]:
next_page = cat.copy()
next_page['relativelink'] = link
next_page['directlink'] = BASE_URL + link
next_page['name'] = cat['name']
CATEGORIES_PAGES.append(next_page)
def write_data(item_data):
"""Write an item data as a row in csv. Create new file if needed"""
fieldnames = ['good_name', 'price', 'old_price', 'id',
'category', 'category_label', 'date']
file_exists = os.path.isfile(PATH_CSV + SITE_NAME + "_" + DATE + ".csv")
if not os.path.exists(PATH_CSV):
os.makedirs(PATH_CSV)
with open(PATH_CSV + SITE_NAME + "_" + DATE + ".csv", "a") as f:
writer = csv.DictWriter(f, fieldnames)
if not file_exists:
writer.writeheader()
writer.writerow(item_data)
def compress_csv():
"""Compress downloaded .csv files"""
if not os.path.exists(PATH_CSV):
os.makedirs(PATH_CSV)
os.chdir(PATH_CSV)
try:
zip_csv = ZipFile(SITE_NAME + '_' + DATE + '_csv.zip', 'a')
for file in glob.glob("*" + DATE + "*" + "csv"):
zip_csv.write(file)
os.remove(file)
log.info(f"Compressing {str(OBSERVATION)} item(s)")
except Exception as e:
log.error('Error when compressing csv')
log.info(type(e).__name__ + str(e))
os.chdir(PROJECT_PATH)
def compress_html():
"""Compress downloaded .html files"""
if not os.path.exists(PATH_HTML):
os.makedirs(PATH_HTML)
os.chdir(PATH_HTML)
try:
zip_csv = ZipFile(SITE_NAME + '_' + DATE + '_html.zip', 'a')
for file in glob.glob("*" + DATE + "*" + "html"):
zip_csv.write(file)
os.remove(file)
log.info("Compressing HTML files")
except Exception as e:
log.error('Error when compressing html')
log.info(type(e).__name__ + str(e))
os.chdir(PROJECT_PATH)
# Run scripts if argument is 'test', run and hibernate if 'run' else hibernate
if "test" in sys.argv:
main()
else:
if "run" in sys.argv:
main()
start_time = '01:' + str(random.randint(0, 59)).zfill(2)
schedule.every().day.at(start_time).do(main)
while True:
schedule.run_pending()
time.sleep(1)