Skip to content

Commit

Permalink
Merge pull request #102 from allipatev/refresh-sync-arch-vol-ftp
Browse files Browse the repository at this point in the history
Refresh "sync arch vol ftp" article
  • Loading branch information
b-mihai authored Dec 5, 2024
2 parents 12af9d4 + d86d633 commit a4adfbb
Show file tree
Hide file tree
Showing 3 changed files with 213 additions and 20 deletions.
4 changes: 2 additions & 2 deletions Database-Features/sql-keywords-reserved-vs-non-reserved.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@ What I want to point out in this article is that keywords come in two different
Luckily there is a system table called `EXA_SQL_KEYWORDS` that contains that list and it obviously fits with the installed version.


```ruby
```sql
SELECT * FROM EXA_SQL_KEYWORDS;
```
Notice the column named `RESERVED`. If that value is false, you are allowed to use that keyword as a regular identifier. Otherwise quoting is mandatory. Let's try this out.


```ruby
```sql
SELECT * FROM VALUES ('MON'), ('TUE'), ('WED'), ('THU'), ('FRI'), ('SAT'), ('SUN') AS DAYS("DAY");
```
Here we use the reserved keyword "DAY" as an identifier. Try that statement in your SQL client to see the effect. You need to enclose the keyword in double quotes for the query to succeed.
Expand Down
169 changes: 169 additions & 0 deletions Environment-Management/attachments/backup_copy_ftp.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
CREATE CONNECTION BACKUPSYNC_LOCAL_CONN
TO 'ftp://%s:%s@%s/v0002'
USER 'admin'
IDENTIFIED BY 'admin';

CREATE CONNECTION BACKUPSYNC_REMOTE_CONN
TO 'ftp://%s:%s@%s/LocalArchiveVolume1'
USER 'admin'
IDENTIFIED BY 'aX1234567';

CREATE SCHEMA TEST;
OPEN SCHEMA TEST;

--
-- UDF to incrementaly copy all local backups to an remote system
--
-- To use it, simply set the correct access URLs and IP addresses to
-- the remote nodes, create the UDF and call it in following SQL:
--
-- SQL_EXA> SELECT syncBackups(IPROC, '<Comma-separated list of remote DB IP addresses>') FROM EXA_LOADAVG;
--
-- The copy process runs completely parallel, distributed over all
-- nodes and only files missing in the remote system are copied, so
-- this UDF can be called on a regular basis. Backup files which are removed
-- from the source will alse be removed in the remote system.
--
--/

CREATE OR REPLACE PYTHON3 SCALAR SCRIPT syncBackups(iproc INT, remote_nodes VARCHAR(2000000)) EMITS (iproc INT, lnum INT, line VARCHAR(2000000)) AS
from socket import inet_ntoa, AF_INET, SOCK_DGRAM, socket
from fcntl import ioctl
from struct import pack
from ftplib import FTP_TLS
from io import StringIO
from urllib.parse import urlparse

class _ftp_writer(object):
def __init__(self, ftp, fpath): self._fpath = fpath; self._ftp = ftp
def __enter__(self):
self._ftp.voidcmd('TYPE I')
self._conn = self._ftp.transfercmd('STOR %s' % self._fpath)
self.length = 0
return self
def __exit__(self, type, value, traceback):
self._conn.close(); self._ftp.voidresp()
def write(self, data):
if data is not None:
self._conn.sendall(data)
self.length += len(data)

class ArchiveVolume:
def __init__(self, host, port, user, pwd, vol):
self._conn = (host, port, user, pwd)
self._vol = vol
self._ftp = FTP_TLS()
self._ftp.connect(host, port)
self._ftp.login(user, pwd)
self._ftp.prot_p()
volumes = set(); self._ftp.retrlines('NLST', volumes.add)
if vol not in volumes:
raise RuntimeError('Volume %s not found' % repr(vol))
self._ftp.sendcmd('CWD %s' % vol)

def _ls(self, path):
status = self._ftp.sendcmd('CWD /%s/%s' % (self._vol, path))
if not status.startswith('250 '):
raise RuntimeError('List of path %s failed: %s' % (repr(path), status))
filelist = []; self._ftp.retrlines('NLST', filelist.append)
return sorted([fname for fname in filelist if not fname.endswith('.tar.gz') and fname != '~'])

def goBackup(self, dbname, backupid, level, nodeid):
self._ftp.sendcmd('CWD /%s/%s' % (self._vol, '/'.join((dbname, backupid, level, nodeid))))
def dbNames(self): return self._ls('')
def backupIds(self, dbname): return self._ls(dbname)
def backupLevel(self, dbname, backupid): return self._ls('%s/%s' % (dbname, backupid))[0]
def nodesList(self, dbname, backupid, level = None):
if level is None: level = self.backupLevel(dbname, backupid)
return self._ls('/'.join((dbname, backupid, self.backupLevel(dbname, backupid)))), level
def filesList(self, dbname, backupid, nodeid, level = None):
if level is None: level = self.backupLevel(dbname, backupid)
return self._ls('/'.join((dbname, backupid, level, nodeid))), level
def writeFile(self, fpath): return _ftp_writer(self._ftp, fpath)
def removeFile(self, dbname, backupid, level, nodeid, fname):
self._ftp.sendcmd('DELE /%s' % '/'.join((self._vol, dbname, backupid, level, nodeid, fname)))
def copyFile(self, dst, dbname, backupid, level, nodeid, fname):
pathdata = [dbname, backupid, level, nodeid]
fpath = []
while len(pathdata) > 0:
dat = pathdata.pop(0)
fpath.append(dat)
dest = '/'.join(fpath)
if dat not in dst._ls('/'.join(fpath)):
dst._ftp.mkd(dest)
self.goBackup(*fpath)
dst.goBackup(*fpath)
fpath.append(fname)
fpath = '/'.join(fpath)
with dst.writeFile(fname) as fd:
self._ftp.retrbinary('RETR %s' % fname, fd.write)
return fd.length

def getBackupList(vol):
lst = set()
for db in vol.dbNames():
try: backupIdsList = vol.backupIds(db)
except: continue
for bid in backupIdsList:
try: nodeslist, level = vol.nodesList(db, bid)
except: continue
for nid in nodeslist:
try: fileslist, level = vol.filesList(db, bid, nid, level = level)
except: continue
for fname in fileslist:
lst.add((db, bid, level, nid, fname))
return lst

def syncBackups(source, destination, node_id, debug = False):
if node_id is not None: node = "node_%d" % node_id
else: node = None
src_url, dst_url = urlparse(source), urlparse(destination)
if src_url.scheme != 'ftp' or dst_url.scheme != 'ftp':
raise RuntimeError('Only FTP access protocol is supported')
if src_url.port is None: src_port = 2021
else: src_port = src_url.port
if dst_url.port is None: dst_port = 2021
else: dst_port = dst_url.port
src_vol = ArchiveVolume(src_url.hostname, int(src_port), src_url.username, src_url.password, src_url.path.split('/')[1])
dst_vol = ArchiveVolume(dst_url.hostname, int(dst_port), dst_url.username, dst_url.password, dst_url.path.split('/')[1])
src_lst, dst_lst = getBackupList(src_vol), getBackupList(dst_vol)
for fpath in sorted(src_lst):
if fpath in dst_lst: continue
if node is not None and fpath[3] != node: continue
length = src_vol.copyFile(dst_vol, *fpath)
if debug: print("cp", '/'.join(fpath), "->", length)
for fpath in sorted(dst_lst):
if fpath in src_lst: continue
if node is not None and fpath[3] != node: continue
if debug: print("rm", '/'.join(fpath))
try: dst_vol.removeFile(*fpath)
except:
if debug: print("rm", repr(fpath), "(-)")

def run(ctx):
sys.stdout = sys.stderr = output = StringIO()
remote_nodes_list = ctx.remote_nodes.split(',')
nid = int(ctx.iproc)
sfd = socket(AF_INET, SOCK_DGRAM)
currentip = inet_ntoa(ioctl(sfd.fileno(), 0x8915, pack('256s', b'private0'))[20:24])
sfd.close()

local_url = exa.get_connection('BACKUPSYNC_LOCAL_CONN').address
local_user = exa.get_connection('BACKUPSYNC_LOCAL_CONN').user
local_password = exa.get_connection('BACKUPSYNC_LOCAL_CONN').password

remote_url = exa.get_connection('BACKUPSYNC_REMOTE_CONN').address
remote_user = exa.get_connection('BACKUPSYNC_REMOTE_CONN').user
remote_password = exa.get_connection('BACKUPSYNC_REMOTE_CONN').password

syncBackups(local_url % (local_user, local_password, currentip),
remote_url % (remote_user, remote_password, remote_nodes_list[nid]),
int(nid), True)
lnum = 0
for line in output.getvalue().split('\n'):
if len(line.strip()) == 0: continue
ctx.emit(nid, lnum, line)
lnum += 1
/

SELECT syncBackups(IPROC, '192.168.0.93') FROM EXA_LOADAVG;
60 changes: 42 additions & 18 deletions Environment-Management/synchronize-archive-volumes-via-ftp.md
Original file line number Diff line number Diff line change
@@ -1,48 +1,72 @@
# Synchronize Archive Volumes via FTP
# Synchronize Archive Volumes via FTP

###### Please note that this script is *not officially supported* by Exasol. We will try to help you as much as possible, but can't guarantee anything since this is not an official Exasol product.

## Background

With this UDF (backup_copy_ftp.sql) written in Python, you can easily synchronize archive volumes between clusters. Transport is TLS encrypted (self._ftp = FTP_TLS). After volumes have been initially synchronized, all files added or deleted will be added or deleted in the target archive volume. This UDF does not support synchronizing specific days or backup IDs, but it can be easily adjusted to your needs. Parallelism is handled by the database. So for best performance, the number of database and master nodes of the target archive volume should be the same.
With this UDF (backup_copy_ftp.sql) written in Python, you can easily synchronize archive volumes between clusters. Transport is TLS encrypted (self._ftp = FTP_TLS). After volumes have been initially synchronized, all files added or deleted will be added or deleted in the target archive volume. This UDF does not support synchronizing specific days or backup IDs, but it can be easily adjusted to your needs. Parallelism is handled by the database. So for best performance, the number of database and master nodes of the target archive volume should be the same.

![](images/UDF_sync_volumes.PNG)

## Prerequisites

* Your Remote Archive Volumes must be accessible to the Exasol cluster.
* The user creating the UDF must have permission to create the script in a schema

## How to synchronize archive volumes via FTP

In this section, you can replace the title of "How To ..." to something that fits the theme better. 
* Your Archive Volumes must be accessible to the Exasol cluster.
* The user creating the UDF must have permission to create the script in a schema.

## Step 1: Create the UDF

Open the attached file (backup_copy_ftp.sql) and create the script in the schema of your choice. Within the UDF, you should adjust these variables accordingly:
Open the attached file (backup_copy_ftp.sql), created connection objects `BACKUPSYNC_LOCAL_CONN` and `BACKUPSYNC_REMOTE_CONN`, then create the script in the schema of your choice. Within the connection object definitionss, you should adjust values

* `SourceArchiveVolumeName`
* `SourceEXAoperationOrConfdUser`
* `SourceEXAoperationOrConfdPW`
* `TargetArchiveVolumeName`
* `TargetEXAoperationOrConfdUser`
* `TargetEXAoperationOrConfdPW`

accordingly:

```python
LOCAL_URL = 'ftp://ExaoperationUser:EXAoperationPW@%s/SourceArchiveVolumeID'
REMOTE_URL = 'ftp://EXAoperationUser:EXAoperationPW@%s/TargetArchiveVolumeID'
REMOTE_NODES = [ 'IP node 11', 'IP node 12', 'IP node 13']
```sql
CREATE CONNECTION BACKUPSYNC_LOCAL_CONN
TO 'ftp://%s:%s@%s/SourceArchiveVolumeName'
USER 'SourceEXAoperationOrConfdUser'
IDENTIFIED BY 'SourceEXAoperationOrConfdPW';

CREATE CONNECTION BACKUPSYNC_REMOTE_CONN
TO 'ftp://%s:%s@%s/TargetArchiveVolumeName'
USER 'TargetEXAoperationOrConfdUser'
IDENTIFIED BY 'TargetEXAoperationOrConfdPW';
```

Note: the calculation of `currentip` in the `run` method might require an adaptation of the name of the respective network interface. Please contact Exasol Support to help you find out this information.

## Step 2

Once the script is created, you can run it like this:


```sql
SELECT syncBackups(IPROC) FROM EXA_LOADAVG;
-- SELECT syncBackups(IPROC,'FIRST_REMOTE_IP,SECOND_REMOTE_IP') FROM EXA_LOADAVG;

SELECT syncBackups(IPROC,'192.168.0.1,192.168.0.2') FROM EXA_LOADAVG;
```
If needed, you can run this script regularly. 
If needed, you can run this script regularly.

## Additional Notes

If a synchronization attempt fails, you must cleanup the target volume manually

## Additional References

* [Create Remote Archive Volume](https://docs.exasol.com/administration/on-premise/manage_storage/create_remote_archive_volume.htm)
* [CREATE CONNECTION](https://docs.exasol.com/db/latest/sql/create_connection.htm)
* [Create Local Archive Volume, version 7.1](https://docs.exasol.com/db/7.1/administration/on-premise/manage_storage/create_local_archive_volume.htm)
* [Create Local Archive Volume, version 8](https://docs.exasol.com/db/latest/administration/on-premise/manage_storage/create_local_archive_volume.htm)
* [Download Local Backup, version 7.1](https://docs.exasol.com/db/7.1/administration/on-premise/backup_restore/download_local_backup.htm)
* [Download Local Backup, version 8](https://docs.exasol.com/db/latest/administration/on-premise/backup_restore/download_local_backup.htm)
* [Upload Local Backup, version 8](https://docs.exasol.com/db/latest/administration/on-premise/backup_restore/upload_local_backup.htm)

## Downloads
[backup_copy_ftp.zip](https://github.com/exasol/Public-Knowledgebase/files/9927383/backup_copy_ftp.zip)

*We appreciate your input! Share your knowledge by contributing to the Knowledge Base directly in [GitHub](https://github.com/exasol/public-knowledgebase).*
* [backup_copy_ftp.sql](https://github.com/exasol/public-knowledgebase/blob/main/Environment-Management/attachments/backup_copy_ftp.sql)

*We appreciate your input! Share your knowledge by contributing to the Knowledge Base directly in [GitHub](https://github.com/exasol/public-knowledgebase).*

0 comments on commit a4adfbb

Please sign in to comment.