forked from garadox/s3yum-updater
-
Notifications
You must be signed in to change notification settings - Fork 6
/
lambda_s3updater.py
155 lines (127 loc) · 4.89 KB
/
lambda_s3updater.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import urlparse
import logging
import sys
import tempfile
import createrepo
import yum
import boto
import os
import shutil
from rpmUtils.miscutils import splitFilename
# Logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Hack for creating s3 urls
urlparse.uses_relative.append('s3')
urlparse.uses_netloc.append('s3')
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
class LoggerCallback(object):
def errorlog(self, message):
logging.error(message)
def log(self, message):
message = message.strip()
if message:
logging.info(message)
class S3Grabber(object):
def __init__(self, baseurl):
base = urlparse.urlsplit(baseurl)
self.baseurl = baseurl
self.basepath = base.path.lstrip('/')
self.bucket = boto.connect_s3().get_bucket(base.netloc)
def _getkey(self, url):
if url.startswith(self.baseurl):
url = url[len(self.baseurl):].lstrip('/')
key = self.bucket.get_key(os.path.join(self.basepath, url))
if not key:
raise createrepo.grabber.URLGrabError(14, '%s not found' % url)
return key
def urlgrab(self, url, filename, **kwargs):
key = self._getkey(url)
logging.debug('downloading: %s', key.name)
key.get_contents_to_filename(filename)
return filename
def urldelete(self, url):
key = self._getkey(url)
logging.debug('removing: %s', key.name)
key.delete()
def syncdir(self, dir, url):
"""Copy all files in dir to url, removing any existing keys."""
base = os.path.join(self.basepath, url)
existing_keys = list(self.bucket.list(base))
new_keys = []
for filename in sorted(os.listdir(dir)):
key = self.bucket.new_key(os.path.join(base, filename))
key.set_contents_from_filename(os.path.join(dir, filename))
new_keys.append(key.name)
logging.debug('uploading: %s', key.name)
for key in existing_keys:
if key.name not in new_keys:
logging.debug('removing: %s', key.name)
key.delete()
def update_repodata(bucketName, key, operation):
if key.rfind("/") > -1:
fileName = key[key.rfind("/")+1:]
repoPath = key[:key.rfind("/")]
else:
fileName = key
repoPath = ""
(name, version, release, epoch, arch) = splitFilename(fileName)
logger.debug("fileName={0}".format(fileName))
logger.debug("repoPath={0}".format(repoPath))
tmpdir = tempfile.mkdtemp()
s3base = urlparse.urlunsplit(("s3", bucketName, repoPath, "", ""))
s3grabber = S3Grabber(s3base)
# Set up temporary repo that will fetch repodata from s3
yumbase = yum.YumBase()
yumbase.preconf.disabled_plugins = '*'
yumbase.conf.cachedir = os.path.join(tmpdir, 'cache')
yumbase.repos.disableRepo('*')
repo = yumbase.add_enable_repo('s3')
repo._grab = s3grabber
repo._urls = [os.path.join(s3base, '')]
# Ensure that missing base path doesn't cause trouble
repo._sack = yum.sqlitesack.YumSqlitePackageSack(
createrepo.readMetadata.CreaterepoPkgOld)
# Create metadata generator
mdconf = createrepo.MetaDataConfig()
mdconf.directory = tmpdir
mdconf.pkglist = yum.packageSack.MetaSack()
mdgen = createrepo.MetaDataGenerator(mdconf, LoggerCallback())
mdgen.tempdir = tmpdir
mdgen._grabber = s3grabber
new_packages = yum.packageSack.PackageSack()
if operation == "add":
# Combine existing package sack with new rpm file list
newpkg = mdgen.read_in_package(os.path.join(s3base, fileName))
newpkg._baseurl = '' # don't leave s3 base urls in primary metadata
new_packages.addPackage(newpkg)
else:
# Remove deleted package
logger.debug("Delete package {0}".format(key))
older_pkgs = yumbase.pkgSack.searchNevra(name=name)
for i, older in enumerate(older_pkgs, 1):
if older.version == version and older.release == release:
yumbase.pkgSack.delPackage(older)
mdconf.pkglist.addSack('existing', yumbase.pkgSack)
mdconf.pkglist.addSack('new', new_packages)
# Write out new metadata to tmpdir
mdgen.doPkgMetadata()
mdgen.doRepoMetadata()
mdgen.doFinalMove()
# Replace metadata on s3
s3grabber.syncdir(os.path.join(tmpdir, 'repodata'), 'repodata')
shutil.rmtree(tmpdir)
def handle(event, context):
record = event['Records'][0]
eventType = record['eventName']
s3Elem = record['s3']
bucketName = s3Elem['bucket']['name']
key = s3Elem['object']['key']
logger.debug("Got Event {0}:{1}/{2}".format(eventType, bucketName, key))
if "Created" in eventType:
update_repodata(bucketName, key, "add")
elif "Removed" in eventType and "Marker" not in eventType:
update_repodata(bucketName, key, "remove")
else:
logger.error("Ignoring EventType {0}".format(eventType))