diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml index 3ddb1a277f..c4183c2601 100644 --- a/.github/workflows/maven.yml +++ b/.github/workflows/maven.yml @@ -26,23 +26,33 @@ name: CI on: push: - branches: [ "master" ] pull_request: branches: [ "master" ] +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + jobs: build-8: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ranger + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Set up JDK 8 uses: actions/setup-java@v4 with: java-version: '8' distribution: 'temurin' - cache: maven - name: build (8) - run: mvn -T 8 clean install --no-transfer-progress -B -V + run: mvn -T 8 clean verify --no-transfer-progress -B -V - name: Upload artifacts uses: actions/upload-artifact@v4 with: @@ -50,17 +60,27 @@ jobs: path: target/* build-11: + needs: + - build-8 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Cache for maven dependencies + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ranger + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- - name: Set up JDK 11 uses: actions/setup-java@v4 with: java-version: '11' distribution: 'temurin' - cache: maven - name: build (11) - run: mvn -T 8 clean install -pl '!knox-agent' --no-transfer-progress -B -V + run: mvn -T 8 clean verify -pl '!knox-agent' --no-transfer-progress -B -V - name: Upload artifacts uses: actions/upload-artifact@v4 with: @@ -70,7 +90,6 @@ jobs: docker-build: needs: - build-8 - - build-11 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -85,7 +104,7 @@ jobs: cp version dev-support/ranger-docker/dist - name: Cache downloaded archives - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: dev-support/ranger-docker/downloads key: ${{ runner.os }}-ranger-downloads-${{ hashFiles('dev-support/ranger-docker/.env') }} diff --git a/agents-audit/pom.xml b/agents-audit/pom.xml index e9c2d4b38e..ad74b07f7c 100644 --- a/agents-audit/pom.xml +++ b/agents-audit/pom.xml @@ -28,6 +28,8 @@ Audit Component Auth Audit + true + false UTF-8 1.2 diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java index f3ba77a3ec..de1ef40075 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java @@ -19,11 +19,6 @@ package org.apache.ranger.audit.destination; -import java.util.Collection; -import java.util.Comparator; -import java.util.Properties; -import java.util.stream.Collectors; - import com.amazonaws.services.logs.AWSLogs; import com.amazonaws.services.logs.AWSLogsClientBuilder; import com.amazonaws.services.logs.model.CreateLogStreamRequest; @@ -32,7 +27,6 @@ import com.amazonaws.services.logs.model.PutLogEventsRequest; import com.amazonaws.services.logs.model.PutLogEventsResult; import com.amazonaws.services.logs.model.ResourceNotFoundException; - import org.apache.commons.lang.StringUtils; import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.provider.MiscUtil; @@ -41,6 +35,11 @@ import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.Comparator; +import java.util.Properties; +import java.util.stream.Collectors; + /** * Writes audit events to Amazon CloudWatch Logs. *

@@ -48,52 +47,72 @@ *

* Thread-safety is ensured by making the log method synchronized. * This is to avoid possible race condition on {@link #sequenceToken} which is required in PutLogEvents API. + * * @see PutLogEvents API Reference *

* Note: Amazon CloudWatch has limits on the payload size and request rate. * Based on the traffic, adjust the batch size and flush interval accordingly. *

- * * @see Amazon CloudWatch Logs Service Limits */ @ThreadSafe public class AmazonCloudWatchAuditDestination extends AuditDestination { - private static final Logger LOG = LoggerFactory.getLogger(AmazonCloudWatchAuditDestination.class); - public static final String PROP_LOG_GROUP_NAME = "log_group"; + public static final String PROP_LOG_GROUP_NAME = "log_group"; public static final String PROP_LOG_STREAM_PREFIX = "log_stream_prefix"; - public static final String CONFIG_PREFIX = "ranger.audit.amazon_cloudwatch"; - public static final String PROP_REGION = "region"; + public static final String CONFIG_PREFIX = "ranger.audit.amazon_cloudwatch"; + public static final String PROP_REGION = "region"; - private String logGroupName; - private String logStreamName; - private AWSLogs logsClient; - private String sequenceToken; - private String regionName; + private String logGroupName; + private String logStreamName; + private volatile AWSLogs logsClient; + private String sequenceToken; + private String regionName; + + static Collection toInputLogEvent(Collection collection) { + return collection.stream() + .map(e -> new InputLogEvent() + .withMessage(MiscUtil.stringify(e)) + .withTimestamp(e.getEventTime().getTime())) + .sorted(Comparator.comparingLong(InputLogEvent::getTimestamp)) + .collect(Collectors.toList()); + } @Override public void init(Properties props, String propPrefix) { LOG.info("init() called for CloudWatchAuditDestination"); + super.init(props, propPrefix); - this.logGroupName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG_GROUP_NAME, "ranger_audits"); + this.logGroupName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG_GROUP_NAME, "ranger_audits"); this.logStreamName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG_STREAM_PREFIX) + MiscUtil.generateUniqueId(); - this.regionName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_REGION); + this.regionName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_REGION); logsClient = getClient(); // Initialize client + createLogStream(); } @Override public void stop() { super.stop(); + logStatus(); } + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + } + @Override public synchronized boolean log(Collection collection) { - boolean ret = false; + boolean ret = false; AWSLogs client = getClient(); PutLogEventsRequest req = new PutLogEventsRequest() @@ -107,92 +126,88 @@ public synchronized boolean log(Collection collection) { try { sequenceToken = pushLogEvents(req, false, client); + addSuccessCount(collection.size()); + ret = true; } catch (Throwable e) { addFailedCount(collection.size()); + LOG.error("Failed to send audit events", e); } return ret; } - private String pushLogEvents(PutLogEventsRequest req, - boolean retryingOnInvalidSeqToken, - AWSLogs client) { + private String pushLogEvents(PutLogEventsRequest req, boolean retryingOnInvalidSeqToken, AWSLogs client) { String sequenceToken; + try { PutLogEventsResult re = client.putLogEvents(req); + sequenceToken = re.getNextSequenceToken(); } catch (ResourceNotFoundException ex) { if (!retryingOnInvalidSeqToken) { createLogStream(); + return pushLogEvents(req, true, client); } + throw ex; } catch (InvalidSequenceTokenException ex) { if (retryingOnInvalidSeqToken) { LOG.error("Unexpected invalid sequence token. Possible race condition occurred"); + throw ex; } // LogStream may exist before first push attempt, re-obtain the sequence token - if (LOG.isDebugEnabled()) { - LOG.debug("Invalid sequence token. Plugin possibly restarted. Updating the sequence token and retrying"); - } + LOG.debug("Invalid sequence token. Plugin possibly restarted. Updating the sequence token and retrying"); + sequenceToken = ex.getExpectedSequenceToken(); + req.setSequenceToken(sequenceToken); + return pushLogEvents(req, true, client); } return sequenceToken; } - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - - } - - static Collection toInputLogEvent(Collection collection) { - return collection.stream() - .map(e -> new InputLogEvent() - .withMessage(MiscUtil.stringify(e)) - .withTimestamp(e.getEventTime().getTime())) - .sorted(Comparator.comparingLong(InputLogEvent::getTimestamp)) - .collect(Collectors.toList()); - } - private void createLogStream() { AWSLogs client = getClient(); + CreateLogStreamRequest req = new CreateLogStreamRequest() .withLogGroupName(logGroupName) .withLogStreamName(logStreamName); - LOG.info(String.format("Creating Log Stream `%s` in Log Group `%s`", logStreamName, logGroupName)); + LOG.info("Creating Log Stream `{}` in Log Group `{}`", logStreamName, logGroupName); + client.createLogStream(req); } private AWSLogs getClient() { - if (logsClient == null) { + AWSLogs ret = logsClient; + + if (ret == null) { synchronized (AmazonCloudWatchAuditDestination.class) { - if (logsClient == null) { - logsClient = newClient(); + ret = logsClient; + + if (ret == null) { + ret = newClient(); + logsClient = ret; } } } - return logsClient; + return ret; } private AWSLogs newClient() { if (StringUtils.isBlank(regionName)) { return AWSLogsClientBuilder.standard().build(); } + return AWSLogsClientBuilder.standard().withRegion(regionName).build(); } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java index c221487c24..3c7654cbdb 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java @@ -19,63 +19,57 @@ package org.apache.ranger.audit.destination; -import java.util.Properties; - import org.apache.ranger.audit.provider.BaseAuditHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Properties; + /** * This class needs to be extended by anyone who wants to build custom * destination */ public abstract class AuditDestination extends BaseAuditHandler { - private static final Logger logger = LoggerFactory.getLogger(AuditDestination.class); - - public AuditDestination() { - logger.info("AuditDestination() enter"); - } + private static final Logger logger = LoggerFactory.getLogger(AuditDestination.class); - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties, - * java.lang.String) - */ - @Override - public void init(Properties prop, String basePropertyName) { - super.init(prop, basePropertyName); - } + public AuditDestination() { + logger.info("AuditDestination() enter"); + } - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties, + * java.lang.String) + */ + @Override + public void init(Properties prop, String basePropertyName) { + super.init(prop, basePropertyName); + } - } + @Override + public void start() { + } - @Override - public void start() { - - } + @Override + public void stop() { + } - @Override - public void stop() { - - } + @Override + public void waitToComplete() { + } - @Override - public void waitToComplete() { - - } + @Override + public void waitToComplete(long timeout) { + } - @Override - public void waitToComplete(long timeout) { - - } - + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java index 8324d998b2..5c536ec4ef 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java @@ -19,21 +19,6 @@ package org.apache.ranger.audit.destination; -import java.io.File; -import java.security.PrivilegedActionException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.http.HttpHost; @@ -63,197 +48,274 @@ import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; +import java.io.File; +import java.security.PrivilegedActionException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + public class ElasticSearchAuditDestination extends AuditDestination { private static final Logger LOG = LoggerFactory.getLogger(ElasticSearchAuditDestination.class); - public static final String CONFIG_URLS = "urls"; - public static final String CONFIG_PORT = "port"; - public static final String CONFIG_USER = "user"; - public static final String CONFIG_PWRD = "password"; + public static final String CONFIG_URLS = "urls"; + public static final String CONFIG_PORT = "port"; + public static final String CONFIG_USER = "user"; + public static final String CONFIG_PWRD = "password"; public static final String CONFIG_PROTOCOL = "protocol"; - public static final String CONFIG_INDEX = "index"; - public static final String CONFIG_PREFIX = "ranger.audit.elasticsearch"; - public static final String DEFAULT_INDEX = "ranger_audits"; - - private String index = CONFIG_INDEX; - private final AtomicReference clientRef = new AtomicReference<>(null); - private String protocol; - private String user; - private int port; - private String password; - private String hosts; + public static final String CONFIG_INDEX = "index"; + public static final String CONFIG_PREFIX = "ranger.audit.elasticsearch"; + public static final String DEFAULT_INDEX = "ranger_audits"; + + private final AtomicReference clientRef = new AtomicReference<>(null); + private final AtomicLong lastLoggedAt = new AtomicLong(0); + + private String index = CONFIG_INDEX; + private String protocol; + private String user; + private int port; + private String password; + private String hosts; private Subject subject; public ElasticSearchAuditDestination() { propPrefix = CONFIG_PREFIX; } + public static RestClientBuilder getRestClientBuilder(String urls, String protocol, String user, String password, int port) { + RestClientBuilder restClientBuilder = RestClient.builder(MiscUtil.toArray(urls, ",").stream().map(x -> new HttpHost(x, port, protocol)).toArray(HttpHost[]::new)); + ThreadFactory clientThreadFactory = new ThreadFactoryBuilder().setNameFormat("ElasticSearch rest client %s").setDaemon(true).build(); + + if (StringUtils.isNotBlank(user) && StringUtils.isNotBlank(password) && !user.equalsIgnoreCase("NONE") && !password.equalsIgnoreCase("NONE")) { + if (password.contains("keytab") && new File(password).exists()) { + final KerberosCredentialsProvider credentialsProvider = CredentialsProviderUtil.getKerberosCredentials(user, password); + final Lookup authSchemeRegistry = RegistryBuilder.create().register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory()).build(); + + restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { + clientBuilder.setThreadFactory(clientThreadFactory); + clientBuilder.setDefaultCredentialsProvider(credentialsProvider); + clientBuilder.setDefaultAuthSchemeRegistry(authSchemeRegistry); + + return clientBuilder; + }); + } else { + final CredentialsProvider credentialsProvider = CredentialsProviderUtil.getBasicCredentials(user, password); + + restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { + clientBuilder.setThreadFactory(clientThreadFactory); + clientBuilder.setDefaultCredentialsProvider(credentialsProvider); + + return clientBuilder; + }); + } + } else { + LOG.error("ElasticSearch Credentials not provided!!"); + + final CredentialsProvider credentialsProvider = null; + + restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { + clientBuilder.setThreadFactory(clientThreadFactory); + clientBuilder.setDefaultCredentialsProvider(credentialsProvider); + + return clientBuilder; + }); + } + + return restClientBuilder; + } @Override public void init(Properties props, String propPrefix) { super.init(props, propPrefix); + this.protocol = getStringProperty(props, propPrefix + "." + CONFIG_PROTOCOL, "http"); - this.user = getStringProperty(props, propPrefix + "." + CONFIG_USER, ""); + this.user = getStringProperty(props, propPrefix + "." + CONFIG_USER, ""); this.password = getStringProperty(props, propPrefix + "." + CONFIG_PWRD, ""); - this.port = MiscUtil.getIntProperty(props, propPrefix + "." + CONFIG_PORT, 9200); - this.index = getStringProperty(props, propPrefix + "." + CONFIG_INDEX, DEFAULT_INDEX); - this.hosts = getHosts(); - LOG.info("Connecting to ElasticSearch: " + connectionString()); - getClient(); // Initialize client - } + this.port = MiscUtil.getIntProperty(props, propPrefix + "." + CONFIG_PORT, 9200); + this.index = getStringProperty(props, propPrefix + "." + CONFIG_INDEX, DEFAULT_INDEX); + this.hosts = getHosts(); - private String connectionString() { - return String.format(Locale.ROOT, "User:%s, %s://%s:%s/%s", user, protocol, hosts, port, index); + LOG.info("Connecting to ElasticSearch: {}", connectionString()); + + getClient(); // Initialize client } @Override public void stop() { super.stop(); + logStatus(); } + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + // Empty flush method + } + @Override public boolean log(Collection events) { boolean ret = false; + try { logStatusIfRequired(); addTotalCount(events.size()); RestHighLevelClient client = getClient(); + if (null == client) { // ElasticSearch is still not initialized. So need return error addDeferredCount(events.size()); + return ret; } - ArrayList eventList = new ArrayList<>(events); - BulkRequest bulkRequest = new BulkRequest(); + ArrayList eventList = new ArrayList<>(events); + BulkRequest bulkRequest = new BulkRequest(); + try { eventList.forEach(event -> { - AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; - String id = authzEvent.getEventId(); - Map doc = toDoc(authzEvent); + AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; + String id = authzEvent.getEventId(); + Map doc = toDoc(authzEvent); + bulkRequest.add(new IndexRequest(index).id(id).source(doc)); }); } catch (Exception ex) { addFailedCount(eventList.size()); logFailedEvent(eventList, ex); } + BulkResponse response = client.bulk(bulkRequest, RequestOptions.DEFAULT); if (response.status().getStatus() >= 400) { addFailedCount(eventList.size()); logFailedEvent(eventList, "HTTP " + response.status().getStatus()); } else { BulkItemResponse[] items = response.getItems(); + for (int i = 0; i < items.length; i++) { - AuditEventBase itemRequest = eventList.get(i); + AuditEventBase itemRequest = eventList.get(i); BulkItemResponse itemResponse = items[i]; + if (itemResponse.isFailed()) { addFailedCount(1); - logFailedEvent(Arrays.asList(itemRequest), itemResponse.getFailureMessage()); + logFailedEvent(Collections.singletonList(itemRequest), itemResponse.getFailureMessage()); } else { if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Indexed %s", itemRequest.getEventKey())); + LOG.debug("Indexed {}", itemRequest.getEventKey()); } + addSuccessCount(1); + ret = true; } } } } catch (Throwable t) { addDeferredCount(events.size()); + logError("Error sending message to ElasticSearch", t); } return ret; } - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - // Empty flush method - } - public boolean isAsync() { return true; } synchronized RestHighLevelClient getClient() { RestHighLevelClient client = clientRef.get(); + if (client == null) { synchronized (ElasticSearchAuditDestination.class) { client = clientRef.get(); + if (client == null) { client = newClient(); + clientRef.set(client); } } } + if (subject != null) { KerberosTicket ticket = CredentialsProviderUtil.getTGT(subject); + try { - if (new Date().getTime() > ticket.getEndTime().getTime()) { - clientRef.set(null); - CredentialsProviderUtil.ticketExpireTime80 = 0; - client = newClient(); - clientRef.set(client); - } else if (CredentialsProviderUtil.ticketWillExpire(ticket)) { - subject = CredentialsProviderUtil.login(user, password); + if (ticket != null) { + if (new Date().getTime() > ticket.getEndTime().getTime()) { + clientRef.set(null); + + CredentialsProviderUtil.ticketExpireTime80 = 0; + + client = newClient(); + + clientRef.set(client); + } else if (CredentialsProviderUtil.ticketWillExpire(ticket)) { + subject = CredentialsProviderUtil.login(user, password); + } + } else { + LOG.error("failed to get KerberosTicket for subject {}", subject); } } catch (PrivilegedActionException e) { LOG.error("PrivilegedActionException:", e); + throw new RuntimeException(e); } } + return client; } - private final AtomicLong lastLoggedAt = new AtomicLong(0); + Map toDoc(AuthzAuditEvent auditEvent) { + Map doc = new HashMap<>(); - public static RestClientBuilder getRestClientBuilder(String urls, String protocol, String user, String password, int port) { - RestClientBuilder restClientBuilder = RestClient.builder( - MiscUtil.toArray(urls, ",").stream() - .map(x -> new HttpHost(x, port, protocol)) - .toArray(HttpHost[]::new) - ); - ThreadFactory clientThreadFactory = new ThreadFactoryBuilder() - .setNameFormat("ElasticSearch rest client %s") - .setDaemon(true) - .build(); - if (StringUtils.isNotBlank(user) && StringUtils.isNotBlank(password) && !user.equalsIgnoreCase("NONE") && !password.equalsIgnoreCase("NONE")) { - if (password.contains("keytab") && new File(password).exists()) { - final KerberosCredentialsProvider credentialsProvider = - CredentialsProviderUtil.getKerberosCredentials(user, password); - Lookup authSchemeRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory()).build(); - restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { - clientBuilder.setThreadFactory(clientThreadFactory); - clientBuilder.setDefaultCredentialsProvider(credentialsProvider); - clientBuilder.setDefaultAuthSchemeRegistry(authSchemeRegistry); - return clientBuilder; - }); - } else { - final CredentialsProvider credentialsProvider = - CredentialsProviderUtil.getBasicCredentials(user, password); - restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { - clientBuilder.setThreadFactory(clientThreadFactory); - clientBuilder.setDefaultCredentialsProvider(credentialsProvider); - return clientBuilder; - }); - } - } else { - LOG.error("ElasticSearch Credentials not provided!!"); - final CredentialsProvider credentialsProvider = null; - restClientBuilder.setHttpClientConfigCallback(clientBuilder -> { - clientBuilder.setThreadFactory(clientThreadFactory); - clientBuilder.setDefaultCredentialsProvider(credentialsProvider); - return clientBuilder; - }); - } - return restClientBuilder; + doc.put("id", auditEvent.getEventId()); + doc.put("access", auditEvent.getAccessType()); + doc.put("enforcer", auditEvent.getAclEnforcer()); + doc.put("agent", auditEvent.getAgentId()); + doc.put("repo", auditEvent.getRepositoryName()); + doc.put("sess", auditEvent.getSessionId()); + doc.put("reqUser", auditEvent.getUser()); + doc.put("reqData", auditEvent.getRequestData()); + doc.put("resource", auditEvent.getResourcePath()); + doc.put("cliIP", auditEvent.getClientIP()); + doc.put("logType", auditEvent.getLogType()); + doc.put("result", auditEvent.getAccessResult()); + doc.put("policy", auditEvent.getPolicyId()); + doc.put("repoType", auditEvent.getRepositoryType()); + doc.put("resType", auditEvent.getResourceType()); + doc.put("reason", auditEvent.getResultReason()); + doc.put("action", auditEvent.getAction()); + doc.put("evtTime", auditEvent.getEventTime()); + doc.put("seq_num", auditEvent.getSeqNum()); + doc.put("event_count", auditEvent.getEventCount()); + doc.put("event_dur_ms", auditEvent.getEventDurationMS()); + doc.put("tags", auditEvent.getTags()); + doc.put("datasets", auditEvent.getDatasets()); + doc.put("projects", auditEvent.getProjects()); + doc.put("cluster", auditEvent.getClusterName()); + doc.put("zoneName", auditEvent.getZoneName()); + doc.put("agentHost", auditEvent.getAgentHostname()); + doc.put("policyVersion", auditEvent.getPolicyVersion()); + + return doc; + } + + private String connectionString() { + return String.format(Locale.ROOT, "User:%s, %s://%s:%s/%s", user, protocol, hosts, port, index); } private RestHighLevelClient newClient() { @@ -261,33 +323,36 @@ private RestHighLevelClient newClient() { if (StringUtils.isNotBlank(user) && StringUtils.isNotBlank(password) && password.contains("keytab") && new File(password).exists()) { subject = CredentialsProviderUtil.login(user, password); } - RestClientBuilder restClientBuilder = - getRestClientBuilder(hosts, protocol, user, password, port); + + RestClientBuilder restClientBuilder = getRestClientBuilder(hosts, protocol, user, password, port); + try (RestHighLevelClient restHighLevelClient = new RestHighLevelClient(restClientBuilder)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Initialized client"); - } + LOG.debug("Initialized client"); + boolean exists = false; + try { exists = restHighLevelClient.indices().open(new OpenIndexRequest(this.index), RequestOptions.DEFAULT).isShardsAcknowledged(); } catch (Exception e) { - LOG.warn("Error validating index " + this.index); + LOG.warn("Error validating index {}", this.index); } + if (exists) { - if (LOG.isDebugEnabled()) { - LOG.debug("Index exists"); - } + LOG.debug("Index exists"); } else { LOG.info("Index does not exist"); } + return restHighLevelClient; } } catch (Throwable t) { lastLoggedAt.updateAndGet(lastLoggedAt -> { - long now = System.currentTimeMillis(); + long now = System.currentTimeMillis(); long elapsed = now - lastLoggedAt; + if (elapsed > TimeUnit.MINUTES.toMillis(1)) { - LOG.error("Can't connect to ElasticSearch server: " + connectionString(), t); + LOG.error("Can't connect to ElasticSearch server: {}", connectionString(), t); + return now; } else { return lastLoggedAt; @@ -299,54 +364,25 @@ private RestHighLevelClient newClient() { private String getHosts() { String urls = MiscUtil.getStringProperty(props, propPrefix + "." + CONFIG_URLS); + if (urls != null) { urls = urls.trim(); } + if ("NONE".equalsIgnoreCase(urls)) { urls = null; } + return urls; } private String getStringProperty(Properties props, String propName, String defaultValue) { String value = MiscUtil.getStringProperty(props, propName); + if (null == value) { return defaultValue; } - return value; - } - Map toDoc(AuthzAuditEvent auditEvent) { - Map doc = new HashMap<>(); - doc.put("id", auditEvent.getEventId()); - doc.put("access", auditEvent.getAccessType()); - doc.put("enforcer", auditEvent.getAclEnforcer()); - doc.put("agent", auditEvent.getAgentId()); - doc.put("repo", auditEvent.getRepositoryName()); - doc.put("sess", auditEvent.getSessionId()); - doc.put("reqUser", auditEvent.getUser()); - doc.put("reqData", auditEvent.getRequestData()); - doc.put("resource", auditEvent.getResourcePath()); - doc.put("cliIP", auditEvent.getClientIP()); - doc.put("logType", auditEvent.getLogType()); - doc.put("result", auditEvent.getAccessResult()); - doc.put("policy", auditEvent.getPolicyId()); - doc.put("repoType", auditEvent.getRepositoryType()); - doc.put("resType", auditEvent.getResourceType()); - doc.put("reason", auditEvent.getResultReason()); - doc.put("action", auditEvent.getAction()); - doc.put("evtTime", auditEvent.getEventTime()); - doc.put("seq_num", auditEvent.getSeqNum()); - doc.put("event_count", auditEvent.getEventCount()); - doc.put("event_dur_ms", auditEvent.getEventDurationMS()); - doc.put("tags", auditEvent.getTags()); - doc.put("datasets", auditEvent.getDatasets()); - doc.put("projects", auditEvent.getProjects()); - doc.put("cluster", auditEvent.getClusterName()); - doc.put("zoneName", auditEvent.getZoneName()); - doc.put("agentHost", auditEvent.getAgentHostname()); - doc.put("policyVersion", auditEvent.getPolicyVersion()); - return doc; + return value; } - } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java index 2bab08ac2f..bd87dc1943 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java @@ -19,6 +19,11 @@ package org.apache.ranger.audit.destination; +import org.apache.ranger.audit.model.AuditEventBase; +import org.apache.ranger.audit.provider.MiscUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; @@ -29,216 +34,232 @@ import java.util.List; import java.util.Properties; -import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.provider.MiscUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class write the logs to local file */ public class FileAuditDestination extends AuditDestination { - private static final Logger logger = LoggerFactory - .getLogger(FileAuditDestination.class); - - public static final String PROP_FILE_LOCAL_DIR = "dir"; - public static final String PROP_FILE_LOCAL_FILE_NAME_FORMAT = "filename.format"; - public static final String PROP_FILE_FILE_ROLLOVER = "file.rollover.sec"; - - String baseFolder = null; - String fileFormat = null; - int fileRolloverSec = 24 * 60 * 60; // In seconds - private String logFileNameFormat; - - boolean initDone = false; - - private File logFolder; - PrintWriter logWriter = null; - - private Date fileCreateTime = null; - - private String currentFileName; - - private boolean isStopped = false; - - @Override - public void init(Properties prop, String propPrefix) { - super.init(prop, propPrefix); - - // Initialize properties for this class - // Initial folder and file properties - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix - + "." + PROP_FILE_LOCAL_DIR); - logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_LOCAL_FILE_NAME_FORMAT); - fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_FILE_ROLLOVER, fileRolloverSec); - - if (logFolderProp == null || logFolderProp.isEmpty()) { - logger.error("File destination folder is not configured. Please set {}. {}. name= {}", propPrefix, PROP_FILE_LOCAL_DIR, getName()); - return; - } - logFolder = new File(logFolderProp); - if (!logFolder.isDirectory()) { - logFolder.mkdirs(); - if (!logFolder.isDirectory()) { - logger.error("FileDestination folder not found and can't be created. folder={}, name={}", logFolder.getAbsolutePath(), getName()); - return; - } - } - logger.info("logFolder={}, name={}", logFolder, getName()); - - if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { - logFileNameFormat = "%app-type%_ranger_audit.log"; - } - - logger.info("logFileNameFormat={}, destName={}", logFileNameFormat, getName()); - - initDone = true; - } - - @Override - synchronized public boolean logJSON(Collection events) { - logStatusIfRequired(); - addTotalCount(events.size()); - - if (isStopped) { - logError("logJSON() called after stop was requested. name={}", getName()); - addDeferredCount(events.size()); - return false; - } - - try { - PrintWriter out = getLogFileStream(); - for (String event : events) { - out.println(event); - } - out.flush(); - } catch (Throwable t) { - addDeferredCount(events.size()); - logError("Error writing to log file.", t); - return false; - } - addSuccessCount(events.size()); - return true; - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection) - */ - @Override - public boolean log(Collection events) { - if (isStopped) { - addTotalCount(events.size()); - addDeferredCount(events.size()); - logError("log() called after stop was requested. name={}", getName()); - return false; - } - List jsonList = new ArrayList(); - for (AuditEventBase event : events) { - try { - jsonList.add(MiscUtil.stringify(event)); - } catch (Throwable t) { - addTotalCount(1); - addFailedCount(1); - logFailedEvent(event); - logger.error("Error converting to JSON. event={}", event); - } - } - return logJSON(jsonList); - - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - public void start() { - // Nothing to do here. We will open the file when the first log request - // comes - } - - @Override - synchronized public void stop() { - isStopped = true; - if (logWriter != null) { - try { - logWriter.flush(); - logWriter.close(); - } catch (Throwable t) { - logger.error("Error on closing log writer. Exception will be ignored. name= {}, fileName= {}", getName(), currentFileName); - } - logWriter = null; - } - logStatus(); - } - - // Helper methods in this class - synchronized private PrintWriter getLogFileStream() throws Exception { - closeFileIfNeeded(); - - // Either there are no open log file or the previous one has been rolled - // over - if (logWriter == null) { - Date currentTime = new Date(); - // Create a new file - String fileName = MiscUtil.replaceTokens(logFileNameFormat, - currentTime.getTime()); - File outLogFile = new File(logFolder, fileName); - if (outLogFile.exists()) { - // Let's try to get the next available file - int i = 0; - while (true) { - i++; - int lastDot = fileName.lastIndexOf('.'); - String baseName = fileName.substring(0, lastDot); - String extension = fileName.substring(lastDot); - String newFileName = baseName + "." + i + extension; - File newLogFile = new File(logFolder, newFileName); - if (!newLogFile.exists()) { - // Move the file - if (!outLogFile.renameTo(newLogFile)) { - logger.error("Error renameing file. {} to {} " , outLogFile, newLogFile); - } - break; - } - } - } - if (!outLogFile.exists()) { - logger.info("Creating new file. destName={} , fileName={} ", getName(), fileName); - // Open the file - logWriter = new PrintWriter(new BufferedWriter(new FileWriter( - outLogFile))); - } else { - logWriter = new PrintWriter(new BufferedWriter(new FileWriter( - outLogFile, true))); - } - fileCreateTime = new Date(); - currentFileName = outLogFile.getPath(); - } - return logWriter; - } - - private void closeFileIfNeeded() { - if (logWriter == null) { - return; - } - if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) { - logger.info("Closing file. Rolling over. name={} , fileName={}", getName(), currentFileName); - try { - logWriter.flush(); - logWriter.close(); - } catch (Throwable t) { - logger.error("Error on closing log writter. Exception will be ignored. name={} , fileName={}", getName(), currentFileName); - } - logWriter = null; - currentFileName = null; - } - } + private static final Logger logger = LoggerFactory.getLogger(FileAuditDestination.class); + + public static final String PROP_FILE_LOCAL_DIR = "dir"; + public static final String PROP_FILE_LOCAL_FILE_NAME_FORMAT = "filename.format"; + public static final String PROP_FILE_FILE_ROLLOVER = "file.rollover.sec"; + + int fileRolloverSec = 24 * 60 * 60; // In seconds + boolean initDone; + PrintWriter logWriter; + + private String logFileNameFormat; + private File logFolder; + private Date fileCreateTime; + private String currentFileName; + private boolean isStopped; + + @Override + public void init(Properties prop, String propPrefix) { + super.init(prop, propPrefix); + + // Initialize properties for this class + // Initial folder and file properties + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_LOCAL_DIR); + + logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_LOCAL_FILE_NAME_FORMAT); + fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_FILE_ROLLOVER, fileRolloverSec); + + if (logFolderProp == null || logFolderProp.isEmpty()) { + logger.error("File destination folder is not configured. Please set {}. {}. name= {}", propPrefix, PROP_FILE_LOCAL_DIR, getName()); + + return; + } + + logFolder = new File(logFolderProp); + + if (!logFolder.isDirectory()) { + logFolder.mkdirs(); + + if (!logFolder.isDirectory()) { + logger.error("FileDestination folder not found and can't be created. folder={}, name={}", logFolder.getAbsolutePath(), getName()); + + return; + } + } + + logger.info("logFolder={}, name={}", logFolder, getName()); + + if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { + logFileNameFormat = "%app-type%_ranger_audit.log"; + } + + logger.info("logFileNameFormat={}, destName={}", logFileNameFormat, getName()); + + initDone = true; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public void start() { + // Nothing to do here. We will open the file when the first log request + // comes + } + + @Override + public synchronized void stop() { + isStopped = true; + + if (logWriter != null) { + try { + logWriter.flush(); + logWriter.close(); + } catch (Throwable t) { + logger.error("Error on closing log writer. Exception will be ignored. name={}, fileName={}", getName(), currentFileName); + } + + logWriter = null; + } + + logStatus(); + } + + @Override + public synchronized boolean logJSON(Collection events) { + logStatusIfRequired(); + addTotalCount(events.size()); + + if (isStopped) { + logError("logJSON() called after stop was requested. name={}", getName()); + + addDeferredCount(events.size()); + + return false; + } + + try { + PrintWriter out = getLogFileStream(); + + for (String event : events) { + out.println(event); + } + + out.flush(); + } catch (Throwable t) { + addDeferredCount(events.size()); + + logError("Error writing to log file.", t); + + return false; + } + + addSuccessCount(events.size()); + + return true; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection) + */ + @Override + public boolean log(Collection events) { + if (isStopped) { + addTotalCount(events.size()); + addDeferredCount(events.size()); + + logError("log() called after stop was requested. name={}", getName()); + + return false; + } + + List jsonList = new ArrayList<>(); + + for (AuditEventBase event : events) { + try { + jsonList.add(MiscUtil.stringify(event)); + } catch (Throwable t) { + addTotalCount(1); + addFailedCount(1); + logFailedEvent(event); + + logger.error("Error converting to JSON. event={}", event); + } + } + + return logJSON(jsonList); + } + + // Helper methods in this class + private synchronized PrintWriter getLogFileStream() throws Exception { + closeFileIfNeeded(); + + // Either there are no open log file or the previous one has been rolled + // over + if (logWriter == null) { + // Create a new file + Date currentTime = new Date(); + String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); + File outLogFile = new File(logFolder, fileName); + + if (outLogFile.exists()) { + // Let's try to get the next available file + int i = 0; + + while (true) { + i++; + + int lastDot = fileName.lastIndexOf('.'); + String baseName = fileName.substring(0, lastDot); + String extension = fileName.substring(lastDot); + String newFileName = baseName + "." + i + extension; + File newLogFile = new File(logFolder, newFileName); + + if (!newLogFile.exists()) { + // Move the file + if (!outLogFile.renameTo(newLogFile)) { + logger.error("Error renameing file. {} to {} ", outLogFile, newLogFile); + } + + break; + } + } + } + + if (!outLogFile.exists()) { + logger.info("Creating new file. destName={} , fileName={} ", getName(), fileName); + + // Open the file + logWriter = new PrintWriter(new BufferedWriter(new FileWriter(outLogFile))); + } else { + logWriter = new PrintWriter(new BufferedWriter(new FileWriter(outLogFile, true))); + } + + fileCreateTime = new Date(); + currentFileName = outLogFile.getPath(); + } + + return logWriter; + } + + private void closeFileIfNeeded() { + if (logWriter == null) { + return; + } + + if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000L) { + logger.info("Closing file. Rolling over. name={} , fileName={}", getName(), currentFileName); + + try { + logWriter.flush(); + logWriter.close(); + } catch (Throwable t) { + logger.error("Error on closing log writter. Exception will be ignored. name={} , fileName={}", getName(), currentFileName); + } + logWriter = null; + currentFileName = null; + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java index 4ad8dfd985..262eb50f44 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java @@ -19,6 +19,13 @@ package org.apache.ranger.audit.destination; +import org.apache.ranger.audit.model.AuditEventBase; +import org.apache.ranger.audit.provider.AuditWriterFactory; +import org.apache.ranger.audit.provider.MiscUtil; +import org.apache.ranger.audit.utils.RangerAuditWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.File; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -27,174 +34,190 @@ import java.util.Map; import java.util.Properties; -import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.provider.AuditWriterFactory; -import org.apache.ranger.audit.provider.MiscUtil; -import org.apache.ranger.audit.utils.RangerAuditWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class write the logs to local file */ public class HDFSAuditDestination extends AuditDestination { - private static final Logger logger = LoggerFactory - .getLogger(HDFSAuditDestination.class); - - private Map auditConfigs = null; - private String auditProviderName = null; - private RangerAuditWriter auditWriter = null; - private boolean initDone = false; - private boolean isStopped = false; - - @Override - public void init(Properties prop, String propPrefix) { - super.init(prop, propPrefix); - this.auditProviderName = getName(); - this.auditConfigs = configProps; - - try { - this.auditWriter = getWriter(); - this.initDone = true; - } catch (Exception e) { - logger.error("Error while getting Audit writer", e); - } - } - - @Override - synchronized public boolean logJSON(final Collection events) { - logStatusIfRequired(); - addTotalCount(events.size()); - - if (!initDone) { - addDeferredCount(events.size()); - return false; - } - if (isStopped) { - addDeferredCount(events.size()); - logError("log() called after stop was requested. name={}", getName()); - return false; - } - try { - boolean ret = auditWriter.log(events); - if (!ret) { - addDeferredCount(events.size()); - return false; - } - } catch (Throwable t) { - addDeferredCount(events.size()); - logError("Error writing to log file.", t); - return false; - } finally { - if (logger.isDebugEnabled()) { - logger.debug("Flushing HDFS audit. Event Size:{}", events.size()); - } - if (auditWriter != null) { - flush(); - } - } - addSuccessCount(events.size()); - return true; - } - - @Override - synchronized public boolean logFile(final File file) { - logStatusIfRequired(); - if (!initDone) { - return false; - } - if (isStopped) { - logError("log() called after stop was requested. name={}", getName()); - return false; - } - - try { - boolean ret = auditWriter.logFile(file); - if (!ret) { - return false; - } - } catch (Throwable t) { - logError("Error writing to log file.", t); - return false; - } finally { - logger.info("Flushing HDFS audit. File:{}{}", file.getAbsolutePath(), file.getName()); - if (auditWriter != null) { - flush(); - } - } - return true; - } - - @Override - public void flush() { - if (logger.isDebugEnabled()) { - logger.debug("==> HDFSAuditDestination.flush() called. name={}", getName()); - } - try { - MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { - auditWriter.flush(); - return null; - }); - } catch (Exception excp) { - logger.error("HDFSAuditDestination.flush() failed", excp); - } - - if (logger.isDebugEnabled()) { - logger.debug("<== HDFSAuditDestination.flush() called. name={}", getName()); - } - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection) - */ - @Override - public boolean log(Collection events) { - if (isStopped) { - logStatusIfRequired(); - addTotalCount(events.size()); - addDeferredCount(events.size()); - logError("log() called after stop was requested. name={}", getName()); - return false; - } - List jsonList = new ArrayList(); - for (AuditEventBase event : events) { - try { - jsonList.add(MiscUtil.stringify(event)); - } catch (Throwable t) { - logger.error("Error converting to JSON. event={}", event); - addTotalCount(1); - addFailedCount(1); - logFailedEvent(event); - } - } - return logJSON(jsonList); - - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - public void start() { - // Nothing to do here. We will open the file when the first log request - // comes - } - - @Override - synchronized public void stop() { - auditWriter.stop(); - logStatus(); - isStopped = true; - } - - public RangerAuditWriter getWriter() throws Exception { - AuditWriterFactory auditWriterFactory = AuditWriterFactory.getInstance(); - auditWriterFactory.init(props, propPrefix, auditProviderName, auditConfigs); - return auditWriterFactory.getAuditWriter(); - } + private static final Logger logger = LoggerFactory.getLogger(HDFSAuditDestination.class); + + private Map auditConfigs; + private String auditProviderName; + private RangerAuditWriter auditWriter; + private boolean initDone; + private boolean isStopped; + + @Override + public void init(Properties prop, String propPrefix) { + super.init(prop, propPrefix); + + this.auditProviderName = getName(); + this.auditConfigs = configProps; + + try { + this.auditWriter = getWriter(); + this.initDone = true; + } catch (Exception e) { + logger.error("Error while getting Audit writer", e); + } + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public void start() { + // Nothing to do here. We will open the file when the first log request + // comes + } + + @Override + public synchronized void stop() { + auditWriter.stop(); + + logStatus(); + + isStopped = true; + } + + @Override + public void flush() { + logger.debug("==> HDFSAuditDestination.flush() called. name={}", getName()); + + try { + MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + auditWriter.flush(); + return null; + }); + } catch (Exception excp) { + logger.error("HDFSAuditDestination.flush() failed", excp); + } + + logger.debug("<== HDFSAuditDestination.flush() called. name={}", getName()); + } + + @Override + public synchronized boolean logJSON(final Collection events) { + logStatusIfRequired(); + addTotalCount(events.size()); + + if (!initDone) { + addDeferredCount(events.size()); + + return false; + } + + if (isStopped) { + addDeferredCount(events.size()); + + logError("log() called after stop was requested. name={}", getName()); + + return false; + } + + try { + boolean ret = auditWriter.log(events); + + if (!ret) { + addDeferredCount(events.size()); + + return false; + } + } catch (Throwable t) { + addDeferredCount(events.size()); + + logError("Error writing to log file.", t); + + return false; + } finally { + logger.debug("Flushing HDFS audit. Event Size:{}", events.size()); + + if (auditWriter != null) { + flush(); + } + } + + addSuccessCount(events.size()); + + return true; + } + + @Override + public synchronized boolean logFile(final File file) { + logStatusIfRequired(); + + if (!initDone) { + return false; + } + + if (isStopped) { + logError("log() called after stop was requested. name={}", getName()); + + return false; + } + + try { + boolean ret = auditWriter.logFile(file); + + if (!ret) { + return false; + } + } catch (Throwable t) { + logError("Error writing to log file.", t); + + return false; + } finally { + logger.info("Flushing HDFS audit. File:{}{}", file.getAbsolutePath(), file.getName()); + + if (auditWriter != null) { + flush(); + } + } + + return true; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection) + */ + @Override + public boolean log(Collection events) { + if (isStopped) { + logStatusIfRequired(); + addTotalCount(events.size()); + addDeferredCount(events.size()); + + logError("log() called after stop was requested. name={}", getName()); + + return false; + } + + List jsonList = new ArrayList<>(); + + for (AuditEventBase event : events) { + try { + jsonList.add(MiscUtil.stringify(event)); + } catch (Throwable t) { + logger.error("Error converting to JSON. event={}", event); + + addTotalCount(1); + addFailedCount(1); + logFailedEvent(event); + } + } + + return logJSON(jsonList); + } + + public RangerAuditWriter getWriter() throws Exception { + AuditWriterFactory auditWriterFactory = AuditWriterFactory.getInstance(); + + auditWriterFactory.init(props, propPrefix, auditProviderName, auditConfigs); + + return auditWriterFactory.getAuditWriter(); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java index 6da3e75875..8e1c20a8c9 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java @@ -19,109 +19,115 @@ package org.apache.ranger.audit.destination; -import java.util.Collection; -import java.util.Properties; - import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.provider.MiscUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collection; +import java.util.Properties; + public class Log4JAuditDestination extends AuditDestination { - private static final Logger logger = LoggerFactory - .getLogger(Log4JAuditDestination.class); - - private static Logger auditLogger = null; - - public static final String PROP_LOG4J_LOGGER = "logger"; - public static final String DEFAULT_LOGGER_PREFIX = "ranger.audit"; - private String loggerName = null; - - public Log4JAuditDestination() { - logger.info("Log4JAuditDestination() called."); - - } - - @Override - public void init(Properties prop, String propPrefix) { - super.init(prop, propPrefix); - loggerName = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_LOG4J_LOGGER); - if (loggerName == null || loggerName.isEmpty()) { - loggerName = DEFAULT_LOGGER_PREFIX + "." + getName(); - logger.info("Logger property " + propPrefix + "." - + PROP_LOG4J_LOGGER + " was not set. Constructing default=" - + loggerName); - } - logger.info("Logger name for " + getName() + " is " + loggerName); - auditLogger = LoggerFactory.getLogger(loggerName); - logger.info("Done initializing logger for audit. name=" + getName() - + ", loggerName=" + loggerName); - } - - - @Override - public void stop() { - super.stop(); - logStatus(); - } - - @Override - public boolean log(AuditEventBase event) { - if (!auditLogger.isInfoEnabled()) { - logStatusIfRequired(); - addTotalCount(1); - return true; - } - - if (event != null) { - String eventStr = MiscUtil.stringify(event); - logJSON(eventStr); - } - return true; - } - - @Override - public boolean log(Collection events) { - if (!auditLogger.isInfoEnabled()) { - logStatusIfRequired(); - addTotalCount(events.size()); - return true; - } - - for (AuditEventBase event : events) { - log(event); - } - return true; - } - - @Override - public boolean logJSON(String event) { - logStatusIfRequired(); - addTotalCount(1); - if (!auditLogger.isInfoEnabled()) { - return true; - } - - if (event != null) { - auditLogger.info(event); - addSuccessCount(1); - } - return true; - } - - @Override - public boolean logJSON(Collection events) { - if (!auditLogger.isInfoEnabled()) { - logStatusIfRequired(); - addTotalCount(events.size()); - return true; - } - - for (String event : events) { - logJSON(event); - } - return false; - } + private static final Logger logger = LoggerFactory.getLogger(Log4JAuditDestination.class); + private static Logger auditLogger; + + public static final String PROP_LOG4J_LOGGER = "logger"; + public static final String DEFAULT_LOGGER_PREFIX = "ranger.audit"; + + public Log4JAuditDestination() { + logger.info("Log4JAuditDestination() called."); + } + + @Override + public void init(Properties prop, String propPrefix) { + super.init(prop, propPrefix); + + String loggerName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG4J_LOGGER); + + if (loggerName == null || loggerName.isEmpty()) { + loggerName = DEFAULT_LOGGER_PREFIX + "." + getName(); + + logger.info("Logger property {}.{} was not set. Constructing default={}", propPrefix, PROP_LOG4J_LOGGER, loggerName); + } + + logger.info("Logger name for {} is {}", getName(), loggerName); + + auditLogger = LoggerFactory.getLogger(loggerName); + + logger.info("Done initializing logger for audit. name={}, loggerName={}", getName(), loggerName); + } + + @Override + public void stop() { + super.stop(); + + logStatus(); + } + + @Override + public boolean log(AuditEventBase event) { + if (!auditLogger.isInfoEnabled()) { + logStatusIfRequired(); + addTotalCount(1); + + return true; + } + + if (event != null) { + String eventStr = MiscUtil.stringify(event); + + logJSON(eventStr); + } + + return true; + } + + @Override + public boolean logJSON(String event) { + logStatusIfRequired(); + addTotalCount(1); + + if (!auditLogger.isInfoEnabled()) { + return true; + } + + if (event != null) { + auditLogger.info(event); + addSuccessCount(1); + } + + return true; + } + + @Override + public boolean logJSON(Collection events) { + if (!auditLogger.isInfoEnabled()) { + logStatusIfRequired(); + addTotalCount(events.size()); + + return true; + } + + for (String event : events) { + logJSON(event); + } + + return false; + } + + @Override + public boolean log(Collection events) { + if (!auditLogger.isInfoEnabled()) { + logStatusIfRequired(); + addTotalCount(events.size()); + + return true; + } + + for (AuditEventBase event : events) { + log(event); + } + return true; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java index 017e15b2e8..4be6002e36 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java @@ -25,26 +25,32 @@ import org.apache.ranger.audit.provider.MiscUtil; import org.apache.ranger.audit.utils.InMemoryJAASConfiguration; import org.apache.ranger.audit.utils.KerberosAction; -import org.apache.ranger.audit.utils.KerberosUser; import org.apache.ranger.audit.utils.KerberosJAASConfigUser; +import org.apache.ranger.audit.utils.KerberosUser; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.impl.Krb5HttpClientBuilder; -import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder; import org.apache.solr.client.solrj.impl.LBHttpSolrClient; +import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder; import org.apache.solr.client.solrj.response.UpdateResponse; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.security.auth.login.LoginException; + import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.lang.reflect.Field; import java.security.KeyManagementException; import java.security.KeyStore; import java.security.KeyStoreException; @@ -54,443 +60,466 @@ import java.security.UnrecoverableKeyException; import java.security.cert.CertificateException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Properties; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.security.auth.login.LoginException; +public class SolrAuditDestination extends AuditDestination { + private static final Logger LOG = LoggerFactory.getLogger(SolrAuditDestination.class); -import java.util.Arrays; -import java.util.Optional; + public static final String PROP_SOLR_URLS = "urls"; + public static final String PROP_SOLR_ZK = "zookeepers"; + public static final String PROP_SOLR_COLLECTION = "collection"; + public static final String PROP_SOLR_FORCE_USE_INMEMORY_JAAS_CONFIG = "force.use.inmemory.jaas.config"; + public static final String DEFAULT_COLLECTION_NAME = "ranger_audits"; + public static final String PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG = "java.security.auth.login.config"; + private volatile SolrClient solrClient; + private volatile KerberosUser kerberosUser; -public class SolrAuditDestination extends AuditDestination { - private static final Logger LOG = LoggerFactory - .getLogger(SolrAuditDestination.class); - - public static final String PROP_SOLR_URLS = "urls"; - public static final String PROP_SOLR_ZK = "zookeepers"; - public static final String PROP_SOLR_COLLECTION = "collection"; - public static final String PROP_SOLR_FORCE_USE_INMEMORY_JAAS_CONFIG = "force.use.inmemory.jaas.config"; - - public static final String DEFAULT_COLLECTION_NAME = "ranger_audits"; - public static final String PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG = "java.security.auth.login.config"; - - private volatile SolrClient solrClient = null; - private volatile KerberosUser kerberosUser = null; - - public SolrAuditDestination() { - } - - @Override - public void init(Properties props, String propPrefix) { - LOG.info("init() called"); - super.init(props, propPrefix); - init(); - connect(); - } - - @Override - public void stop() { - LOG.info("SolrAuditDestination.stop() called.."); - logStatus(); - - if (solrClient != null) { - try { - solrClient.close(); - } catch (IOException ioe) { - LOG.error("Error while stopping slor!", ioe); - } finally { - solrClient = null; - } - } - - if (kerberosUser != null) { - try { - kerberosUser.logout(); - } catch (LoginException excp) { - LOG.error("Error logging out keytab user", excp); - } finally { - kerberosUser = null; - } - } - } - - synchronized void connect() { - SolrClient me = solrClient; - if (me == null) { - synchronized(SolrAuditDestination.class) { - me = solrClient; - if (solrClient == null) { - KeyManager[] kmList = getKeyManagers(); - TrustManager[] tmList = getTrustManagers(); - SSLContext sslContext = getSSLContext(kmList, tmList); - if(sslContext != null) { - SSLContext.setDefault(sslContext); - } - String urls = MiscUtil.getStringProperty(props, propPrefix - + "." + PROP_SOLR_URLS); - if (urls != null) { - urls = urls.trim(); - } - if (urls != null && urls.equalsIgnoreCase("NONE")) { - urls = null; - } - List solrURLs = new ArrayList(); - String zkHosts = null; - solrURLs = MiscUtil.toArray(urls, ","); - zkHosts = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_SOLR_ZK); - if (zkHosts != null && zkHosts.equalsIgnoreCase("NONE")) { - zkHosts = null; - } - String collectionName = MiscUtil.getStringProperty(props, - propPrefix + "." + PROP_SOLR_COLLECTION); - if (collectionName == null - || collectionName.equalsIgnoreCase("none")) { - collectionName = DEFAULT_COLLECTION_NAME; - } - - LOG.info("Solr zkHosts=" + zkHosts + ", solrURLs=" + urls - + ", collectionName=" + collectionName); - - if (zkHosts != null && !zkHosts.isEmpty()) { - LOG.info("Connecting to solr cloud using zkHosts=" - + zkHosts); - try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) { - SolrHttpClientBuilder kb = krbBuild.getBuilder(); - HttpClientUtil.setHttpClientBuilder(kb); - - final List zkhosts = new ArrayList(Arrays.asList(zkHosts.split(","))); - final CloudSolrClient solrCloudClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public CloudSolrClient run() throws Exception { - CloudSolrClient solrCloudClient = new CloudSolrClient.Builder(zkhosts, Optional.empty()).build(); - return solrCloudClient; - } - - ; - }); - - solrCloudClient.setDefaultCollection(collectionName); - me = solrClient = solrCloudClient; - } catch (Throwable t) { - LOG.error("Can't connect to Solr server. ZooKeepers=" - + zkHosts, t); - } - } else if (solrURLs != null && !solrURLs.isEmpty()) { - try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) { - LOG.info("Connecting to Solr using URLs=" + solrURLs); - SolrHttpClientBuilder kb = krbBuild.getBuilder(); - HttpClientUtil.setHttpClientBuilder(kb); - final List solrUrls = solrURLs; - final LBHttpSolrClient lbSolrClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public LBHttpSolrClient run() throws Exception { - LBHttpSolrClient.Builder builder = new LBHttpSolrClient.Builder(); - builder.withBaseSolrUrl(solrUrls.get(0)); - builder.withConnectionTimeout(1000); - LBHttpSolrClient lbSolrClient = builder.build(); - return lbSolrClient; - } - - ; - }); - - for (int i = 1; i < solrURLs.size(); i++) { - lbSolrClient.addSolrServer(solrURLs.get(i)); - } - me = solrClient = lbSolrClient; - } catch (Throwable t) { - LOG.error("Can't connect to Solr server. URL=" - + solrURLs, t); - } - } - } - } - } - } - - @Override - public boolean log(Collection events) { - boolean ret = false; - try { - logStatusIfRequired(); - addTotalCount(events.size()); - - if (solrClient == null) { - connect(); - if (solrClient == null) { - // Solr is still not initialized. So need return error - addDeferredCount(events.size()); - return ret; - } - } - - final Collection docs = new ArrayList(); - for (AuditEventBase event : events) { - AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; - // Convert AuditEventBase to Solr document - SolrInputDocument document = toSolrDoc(authzEvent); - docs.add(document); - } - try { - final UpdateResponse response = addDocsToSolr(solrClient, docs); - - if (response.getStatus() != 0) { - addFailedCount(events.size()); - logFailedEvent(events, response.toString()); - } else { - addSuccessCount(events.size()); - ret = true; - } - } catch (SolrException ex) { - addFailedCount(events.size()); - logFailedEvent(events, ex); - } - } catch (Throwable t) { - addDeferredCount(events.size()); - logError("Error sending message to Solr", t); - } - return ret; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - - } - - SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) { - SolrInputDocument doc = new SolrInputDocument(); - doc.addField("id", auditEvent.getEventId()); - doc.addField("access", auditEvent.getAccessType()); - doc.addField("enforcer", auditEvent.getAclEnforcer()); - doc.addField("agent", auditEvent.getAgentId()); - doc.addField("repo", auditEvent.getRepositoryName()); - doc.addField("sess", auditEvent.getSessionId()); - doc.addField("reqUser", auditEvent.getUser()); - doc.addField("reqData", auditEvent.getRequestData()); - doc.addField("resource", auditEvent.getResourcePath()); - doc.addField("cliIP", auditEvent.getClientIP()); - doc.addField("logType", auditEvent.getLogType()); - doc.addField("result", auditEvent.getAccessResult()); - doc.addField("policy", auditEvent.getPolicyId()); - doc.addField("repoType", auditEvent.getRepositoryType()); - doc.addField("resType", auditEvent.getResourceType()); - doc.addField("reason", auditEvent.getResultReason()); - doc.addField("action", auditEvent.getAction()); - doc.addField("evtTime", auditEvent.getEventTime()); - doc.addField("seq_num", auditEvent.getSeqNum()); - doc.setField("event_count", auditEvent.getEventCount()); - doc.setField("event_dur_ms", auditEvent.getEventDurationMS()); - doc.setField("tags", auditEvent.getTags()); - doc.addField("datasets", auditEvent.getDatasets()); - doc.addField("projects", auditEvent.getProjects()); - doc.setField("cluster", auditEvent.getClusterName()); - doc.setField("zoneName", auditEvent.getZoneName()); - doc.setField("agentHost", auditEvent.getAgentHostname()); - doc.setField("policyVersion", auditEvent.getPolicyVersion()); - - return doc; - } - - public boolean isAsync() { - return true; - } - - private void init() { - LOG.info("==>SolrAuditDestination.init()" ); - try { - // SolrJ requires "java.security.auth.login.config" property to be set to identify itself that it is kerberized. So using a dummy property for it - // Acutal solrclient JAAS configs are read from the ranger--audit.xml present in components conf folder and set by InMemoryJAASConfiguration - // Refer InMemoryJAASConfiguration doc for JAAS Configuration - String confFileName = System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG); - LOG.info("In solrAuditDestination.init() : JAAS Configuration set as [" + confFileName + "]"); - if ( System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG) == null ) { - if ( MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_SOLR_FORCE_USE_INMEMORY_JAAS_CONFIG,false) ) { - System.setProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG, "/dev/null"); - } else { - LOG.warn("No Client JAAS config present in solr audit config. Ranger Audit to Kerberized Solr will fail..."); - } - } - - LOG.info("Loading SolrClient JAAS config from Ranger audit config if present..."); - - InMemoryJAASConfiguration conf = InMemoryJAASConfiguration.init(props); - - KerberosUser kerberosUser = new KerberosJAASConfigUser("Client", conf); - - if (kerberosUser.getPrincipal() != null) { - this.kerberosUser = kerberosUser; - } - } catch (Exception e) { - LOG.error("ERROR: Unable to load SolrClient JAAS config from Audit config file. Audit to Kerberized Solr will fail...", e); - } finally { - String confFileName = System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG); - LOG.info("In solrAuditDestination.init() (finally) : JAAS Configuration set as [" + confFileName + "]"); - } - LOG.info("<==SolrAuditDestination.init()" ); - } - - private KeyManager[] getKeyManagers() { - KeyManager[] kmList = null; - String credentialProviderPath = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL); - String keyStoreAlias = RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL_ALIAS; - String keyStoreFile = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE); - String keyStoreFilepwd = MiscUtil.getCredentialString(credentialProviderPath, keyStoreAlias); - if (StringUtils.isNotEmpty(keyStoreFile) && StringUtils.isNotEmpty(keyStoreFilepwd)) { - InputStream in = null; - - try { - in = getFileInputStream(keyStoreFile); - - if (in != null) { - String keyStoreType = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE); - keyStoreType = StringUtils.isNotEmpty(keyStoreType) ? keyStoreType : RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE_DEFAULT; - KeyStore keyStore = KeyStore.getInstance(keyStoreType); - - keyStore.load(in, keyStoreFilepwd.toCharArray()); - - KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(RANGER_SSL_KEYMANAGER_ALGO_TYPE); - - keyManagerFactory.init(keyStore, keyStoreFilepwd.toCharArray()); - - kmList = keyManagerFactory.getKeyManagers(); - } else { - LOG.error("Unable to obtain keystore from file [" + keyStoreFile + "]"); - } - } catch (KeyStoreException e) { - LOG.error("Unable to obtain from KeyStore :" + e.getMessage(), e); - } catch (NoSuchAlgorithmException e) { - LOG.error("SSL algorithm is NOT available in the environment", e); - } catch (CertificateException e) { - LOG.error("Unable to obtain the requested certification ", e); - } catch (FileNotFoundException e) { - LOG.error("Unable to find the necessary SSL Keystore Files", e); - } catch (IOException e) { - LOG.error("Unable to read the necessary SSL Keystore Files", e); - } catch (UnrecoverableKeyException e) { - LOG.error("Unable to recover the key from keystore", e); - } finally { - close(in, keyStoreFile); - } - } - - return kmList; - } - - private TrustManager[] getTrustManagers() { - TrustManager[] tmList = null; - String credentialProviderPath = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL); - String trustStoreAlias = RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL_ALIAS; - String trustStoreFile = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE); - String trustStoreFilepwd = MiscUtil.getCredentialString(credentialProviderPath, trustStoreAlias); - if (StringUtils.isNotEmpty(trustStoreFile) && StringUtils.isNotEmpty(trustStoreFilepwd)) { - InputStream in = null; - - try { - in = getFileInputStream(trustStoreFile); - - if (in != null) { - String trustStoreType = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE); - trustStoreType = StringUtils.isNotEmpty(trustStoreType) ? trustStoreType : RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE_DEFAULT; - KeyStore trustStore = KeyStore.getInstance(trustStoreType); - - trustStore.load(in, trustStoreFilepwd.toCharArray()); - - TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(RANGER_SSL_TRUSTMANAGER_ALGO_TYPE); - - trustManagerFactory.init(trustStore); - - tmList = trustManagerFactory.getTrustManagers(); - } else { - LOG.error("Unable to obtain truststore from file [" + trustStoreFile + "]"); - } - } catch (KeyStoreException e) { - LOG.error("Unable to obtain from KeyStore", e); - } catch (NoSuchAlgorithmException e) { - LOG.error("SSL algorithm is NOT available in the environment :" + e.getMessage(), e); - } catch (CertificateException e) { - LOG.error("Unable to obtain the requested certification :" + e.getMessage(), e); - } catch (FileNotFoundException e) { - LOG.error("Unable to find the necessary SSL TrustStore File:" + trustStoreFile, e); - } catch (IOException e) { - LOG.error("Unable to read the necessary SSL TrustStore Files :" + trustStoreFile, e); - } finally { - close(in, trustStoreFile); - } - } - - return tmList; - } - - private SSLContext getSSLContext(KeyManager[] kmList, TrustManager[] tmList) { - SSLContext sslContext = null; - try { - sslContext = SSLContext.getInstance(RANGER_SSL_CONTEXT_ALGO_TYPE); - if (sslContext != null) { - sslContext.init(kmList, tmList, new SecureRandom()); - } - } catch (NoSuchAlgorithmException e) { - LOG.error("SSL algorithm is not available in the environment", e); - } catch (KeyManagementException e) { - LOG.error("Unable to initialise the SSLContext", e); - } - return sslContext; - } - - private UpdateResponse addDocsToSolr(final SolrClient solrClient, final Collection docs) throws Exception { - final UpdateResponse ret; - - try { - final PrivilegedExceptionAction action = () -> solrClient.add(docs); - - if (kerberosUser != null) { - // execute the privileged action as the given keytab user - final KerberosAction kerberosAction = new KerberosAction<>(kerberosUser, action, LOG); - - ret = (UpdateResponse) kerberosAction.execute(); - } else { - ret = action.run(); - } - } catch (Exception e) { - throw e; - } - - return ret; - } - - private InputStream getFileInputStream(String fileName) throws IOException { - InputStream in = null; - if (StringUtils.isNotEmpty(fileName)) { - File file = new File(fileName); - if (file != null && file.exists()) { - in = new FileInputStream(file); - } else { - in = ClassLoader.getSystemResourceAsStream(fileName); - } - } - return in; - } - - private void close(InputStream str, String filename) { - if (str != null) { - try { - str.close(); - } catch (IOException excp) { - LOG.error("Error while closing file: [" + filename + "]", excp); - } - } - } + public SolrAuditDestination() { + } + + @Override + public void init(Properties props, String propPrefix) { + LOG.info("init() called"); + + super.init(props, propPrefix); + + init(); + connect(); + } + + @Override + public void stop() { + LOG.info("SolrAuditDestination.stop() called.."); + + logStatus(); + + SolrClient solrClient = this.solrClient; + + if (solrClient != null) { + try { + solrClient.close(); + } catch (IOException ioe) { + LOG.error("Error while stopping solr!", ioe); + } finally { + this.solrClient = null; + } + } + + KerberosUser kerberosUser = this.kerberosUser; + + if (kerberosUser != null) { + try { + kerberosUser.logout(); + } catch (LoginException excp) { + LOG.error("Error logging out keytab user", excp); + } finally { + this.kerberosUser = null; + } + } + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + } + + @Override + public boolean log(Collection events) { + boolean ret = false; + + try { + logStatusIfRequired(); + addTotalCount(events.size()); + + SolrClient solrClient = this.solrClient; + + if (solrClient == null) { + connect(); + + solrClient = this.solrClient; + + if (solrClient == null) { + // Solr is still not initialized. So need return error + addDeferredCount(events.size()); + + return ret; + } + } + + final Collection docs = new ArrayList<>(); + + for (AuditEventBase event : events) { + AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; + + // Convert AuditEventBase to Solr document + SolrInputDocument document = toSolrDoc(authzEvent); + + docs.add(document); + } + + try { + final UpdateResponse response = addDocsToSolr(solrClient, docs); + + if (response.getStatus() != 0) { + addFailedCount(events.size()); + + logFailedEvent(events, response.toString()); + } else { + addSuccessCount(events.size()); + + ret = true; + } + } catch (SolrException ex) { + addFailedCount(events.size()); + logFailedEvent(events, ex); + } + } catch (Throwable t) { + addDeferredCount(events.size()); + + logError("Error sending message to Solr", t); + } + + return ret; + } + + public boolean isAsync() { + return true; + } + + synchronized void connect() { + SolrClient me = solrClient; + + if (me == null) { + synchronized (SolrAuditDestination.class) { + me = solrClient; + + if (me == null) { + KeyManager[] kmList = getKeyManagers(); + TrustManager[] tmList = getTrustManagers(); + SSLContext sslContext = getSSLContext(kmList, tmList); + + if (sslContext != null) { + SSLContext.setDefault(sslContext); + } + + String urls = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_SOLR_URLS); + + if (urls != null) { + urls = urls.trim(); + } + + if (urls != null && urls.equalsIgnoreCase("NONE")) { + urls = null; + } + + List solrURLs = MiscUtil.toArray(urls, ","); + String zkHosts = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_SOLR_ZK); + + if (zkHosts != null && zkHosts.equalsIgnoreCase("NONE")) { + zkHosts = null; + } + + String collectionName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_SOLR_COLLECTION); + + if (collectionName == null || collectionName.equalsIgnoreCase("none")) { + collectionName = DEFAULT_COLLECTION_NAME; + } + + LOG.info("Solr zkHosts={}, solrURLs={}, collectionName={}", zkHosts, urls, collectionName); + + if (zkHosts != null && !zkHosts.isEmpty()) { + LOG.info("Connecting to solr cloud using zkHosts={}", zkHosts); + + try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) { + SolrHttpClientBuilder kb = krbBuild.getBuilder(); + + HttpClientUtil.setHttpClientBuilder(kb); + + final List zkhosts = new ArrayList<>(Arrays.asList(zkHosts.split(","))); + final CloudSolrClient solrCloudClient = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> new CloudSolrClient.Builder(zkhosts, Optional.empty()).build()); + + solrCloudClient.setDefaultCollection(collectionName); + + me = solrCloudClient; + solrClient = me; + } catch (Throwable t) { + LOG.error("Can't connect to Solr server. ZooKeepers={}", zkHosts, t); + } + } else if (solrURLs != null && !solrURLs.isEmpty()) { + try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) { + LOG.info("Connecting to Solr using URLs={}", solrURLs); + + SolrHttpClientBuilder kb = krbBuild.getBuilder(); + + HttpClientUtil.setHttpClientBuilder(kb); + + final List solrUrls = solrURLs; + final LBHttpSolrClient lbSolrClient = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + LBHttpSolrClient.Builder builder = new LBHttpSolrClient.Builder(); + + builder.withBaseSolrUrl(solrUrls.get(0)); + builder.withConnectionTimeout(1000); + + return builder.build(); + }); + + for (int i = 1; i < solrURLs.size(); i++) { + lbSolrClient.addSolrServer(solrURLs.get(i)); + } + + me = lbSolrClient; + solrClient = me; + } catch (Throwable t) { + LOG.error("Can't connect to Solr server. URL={}", solrURLs, t); + } + } + } + } + } + } + + SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) { + SolrInputDocument doc = new SolrInputDocument(); + + doc.addField("id", auditEvent.getEventId()); + doc.addField("access", auditEvent.getAccessType()); + doc.addField("enforcer", auditEvent.getAclEnforcer()); + doc.addField("agent", auditEvent.getAgentId()); + doc.addField("repo", auditEvent.getRepositoryName()); + doc.addField("sess", auditEvent.getSessionId()); + doc.addField("reqUser", auditEvent.getUser()); + doc.addField("reqData", auditEvent.getRequestData()); + doc.addField("resource", auditEvent.getResourcePath()); + doc.addField("cliIP", auditEvent.getClientIP()); + doc.addField("logType", auditEvent.getLogType()); + doc.addField("result", auditEvent.getAccessResult()); + doc.addField("policy", auditEvent.getPolicyId()); + doc.addField("repoType", auditEvent.getRepositoryType()); + doc.addField("resType", auditEvent.getResourceType()); + doc.addField("reason", auditEvent.getResultReason()); + doc.addField("action", auditEvent.getAction()); + doc.addField("evtTime", auditEvent.getEventTime()); + doc.addField("seq_num", auditEvent.getSeqNum()); + doc.setField("event_count", auditEvent.getEventCount()); + doc.setField("event_dur_ms", auditEvent.getEventDurationMS()); + doc.setField("tags", auditEvent.getTags()); + doc.addField("datasets", auditEvent.getDatasets()); + doc.addField("projects", auditEvent.getProjects()); + doc.setField("cluster", auditEvent.getClusterName()); + doc.setField("zoneName", auditEvent.getZoneName()); + doc.setField("agentHost", auditEvent.getAgentHostname()); + doc.setField("policyVersion", auditEvent.getPolicyVersion()); + + return doc; + } + + private void init() { + LOG.info("==>SolrAuditDestination.init()"); + + try { + // SolrJ requires "java.security.auth.login.config" property to be set to identify itself that it is kerberized. So using a dummy property for it + // Acutal solrclient JAAS configs are read from the ranger--audit.xml present in components conf folder and set by InMemoryJAASConfiguration + // Refer InMemoryJAASConfiguration doc for JAAS Configuration + String confFileName = System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG); + + LOG.info("In solrAuditDestination.init() : JAAS Configuration set as [{}]", confFileName); + + if (System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG) == null) { + if (MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_SOLR_FORCE_USE_INMEMORY_JAAS_CONFIG, false)) { + System.setProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG, "/dev/null"); + } else { + LOG.warn("No Client JAAS config present in solr audit config. Ranger Audit to Kerberized Solr will fail..."); + } + } + + LOG.info("Loading SolrClient JAAS config from Ranger audit config if present..."); + + InMemoryJAASConfiguration conf = InMemoryJAASConfiguration.init(props); + + KerberosUser kerberosUser = new KerberosJAASConfigUser("Client", conf); + + if (kerberosUser.getPrincipal() != null) { + this.kerberosUser = kerberosUser; + } + } catch (Exception e) { + LOG.error("ERROR: Unable to load SolrClient JAAS config from Audit config file. Audit to Kerberized Solr will fail...", e); + } finally { + String confFileName = System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG); + LOG.info("In solrAuditDestination.init() (finally) : JAAS Configuration set as [{}]", confFileName); + } + + LOG.info("<==SolrAuditDestination.init()"); + } + + private KeyManager[] getKeyManagers() { + KeyManager[] kmList = null; + String credentialProviderPath = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL); + String keyStoreAlias = RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL_ALIAS; + String keyStoreFile = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE); + String keyStoreFilepwd = MiscUtil.getCredentialString(credentialProviderPath, keyStoreAlias); + + if (StringUtils.isNotEmpty(keyStoreFile) && StringUtils.isNotEmpty(keyStoreFilepwd)) { + InputStream in = null; + + try { + in = getFileInputStream(keyStoreFile); + + if (in != null) { + String keyStoreType = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE); + + keyStoreType = StringUtils.isNotEmpty(keyStoreType) ? keyStoreType : RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE_DEFAULT; + + KeyStore keyStore = KeyStore.getInstance(keyStoreType); + + keyStore.load(in, keyStoreFilepwd.toCharArray()); + + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(RANGER_SSL_KEYMANAGER_ALGO_TYPE); + + keyManagerFactory.init(keyStore, keyStoreFilepwd.toCharArray()); + + kmList = keyManagerFactory.getKeyManagers(); + } else { + LOG.error("Unable to obtain keystore from file [{}]", keyStoreFile); + } + } catch (KeyStoreException e) { + LOG.error("Unable to obtain from KeyStore :{}", e.getMessage(), e); + } catch (NoSuchAlgorithmException e) { + LOG.error("SSL algorithm is NOT available in the environment", e); + } catch (CertificateException e) { + LOG.error("Unable to obtain the requested certification ", e); + } catch (FileNotFoundException e) { + LOG.error("Unable to find the necessary SSL Keystore Files", e); + } catch (IOException e) { + LOG.error("Unable to read the necessary SSL Keystore Files", e); + } catch (UnrecoverableKeyException e) { + LOG.error("Unable to recover the key from keystore", e); + } finally { + close(in, keyStoreFile); + } + } + + return kmList; + } + + private TrustManager[] getTrustManagers() { + TrustManager[] tmList = null; + String credentialProviderPath = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL); + String trustStoreAlias = RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL_ALIAS; + String trustStoreFile = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE); + String trustStoreFilepwd = MiscUtil.getCredentialString(credentialProviderPath, trustStoreAlias); + + if (StringUtils.isNotEmpty(trustStoreFile) && StringUtils.isNotEmpty(trustStoreFilepwd)) { + InputStream in = null; + + try { + in = getFileInputStream(trustStoreFile); + + if (in != null) { + String trustStoreType = MiscUtil.getStringProperty(props, RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE); + + trustStoreType = StringUtils.isNotEmpty(trustStoreType) ? trustStoreType : RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE_DEFAULT; + + KeyStore trustStore = KeyStore.getInstance(trustStoreType); + + trustStore.load(in, trustStoreFilepwd.toCharArray()); + + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(RANGER_SSL_TRUSTMANAGER_ALGO_TYPE); + + trustManagerFactory.init(trustStore); + + tmList = trustManagerFactory.getTrustManagers(); + } else { + LOG.error("Unable to obtain truststore from file [{}]", trustStoreFile); + } + } catch (KeyStoreException e) { + LOG.error("Unable to obtain from KeyStore", e); + } catch (NoSuchAlgorithmException e) { + LOG.error("SSL algorithm is NOT available in the environment :{}", e.getMessage(), e); + } catch (CertificateException e) { + LOG.error("Unable to obtain the requested certification :{}", e.getMessage(), e); + } catch (FileNotFoundException e) { + LOG.error("Unable to find the necessary SSL TrustStore File:{}", trustStoreFile, e); + } catch (IOException e) { + LOG.error("Unable to read the necessary SSL TrustStore Files :{}", trustStoreFile, e); + } finally { + close(in, trustStoreFile); + } + } + + return tmList; + } + + private SSLContext getSSLContext(KeyManager[] kmList, TrustManager[] tmList) { + SSLContext sslContext = null; + + try { + sslContext = SSLContext.getInstance(RANGER_SSL_CONTEXT_ALGO_TYPE); + + if (sslContext != null) { + sslContext.init(kmList, tmList, new SecureRandom()); + } + } catch (NoSuchAlgorithmException e) { + LOG.error("SSL algorithm is not available in the environment", e); + } catch (KeyManagementException e) { + LOG.error("Unable to initialise the SSLContext", e); + } + + return sslContext; + } + + private UpdateResponse addDocsToSolr(final SolrClient solrClient, final Collection docs) throws Exception { + final UpdateResponse ret; + + try { + final PrivilegedExceptionAction action = () -> solrClient.add(docs); + + if (kerberosUser != null) { + // execute the privileged action as the given keytab user + final KerberosAction kerberosAction = new KerberosAction<>(kerberosUser, action, LOG); + + ret = (UpdateResponse) kerberosAction.execute(); + } else { + ret = action.run(); + } + } catch (Exception e) { + throw e; + } + + return ret; + } + + private InputStream getFileInputStream(String fileName) throws IOException { + InputStream in = null; + + if (StringUtils.isNotEmpty(fileName)) { + File file = new File(fileName); + + if (file.exists()) { + in = new FileInputStream(file); + } else { + in = ClassLoader.getSystemResourceAsStream(fileName); + } + } + + return in; + } + + private void close(InputStream str, String filename) { + if (str != null) { + try { + str.close(); + } catch (IOException excp) { + LOG.error("Error while closing file: [{}]", filename, excp); + } + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java index 84eaebec9c..1494631639 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java @@ -22,12 +22,14 @@ import java.util.Date; public abstract class AuditEventBase { + protected AuditEventBase() { + } - protected AuditEventBase() { - } + public abstract String getEventKey(); - public abstract String getEventKey(); - public abstract Date getEventTime (); - public abstract void setEventCount(long eventCount); - public abstract void setEventDurationMS(long eventDurationMS); + public abstract Date getEventTime(); + + public abstract void setEventCount(long eventCount); + + public abstract void setEventDurationMS(long eventDurationMS); } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java index d0ea2b9087..9cb093a88a 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java @@ -22,118 +22,116 @@ import java.util.Date; public class AuditIndexRecord { - String id; - String filePath; - int linePosition = 0; - SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress; - Date fileCreateTime; - Date writeCompleteTime; - Date doneCompleteTime; - Date lastSuccessTime; - Date lastFailedTime; - int failedAttemptCount = 0; - boolean lastAttempt = false; - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getFilePath() { - return filePath; - } - - public void setFilePath(String filePath) { - this.filePath = filePath; - } - - public int getLinePosition() { - return linePosition; - } - - public void setLinePosition(int linePosition) { - this.linePosition = linePosition; - } - - public SPOOL_FILE_STATUS getStatus() { - return status; - } - - public void setStatus(SPOOL_FILE_STATUS status) { - this.status = status; - } - - public Date getFileCreateTime() { - return fileCreateTime; - } - - public void setFileCreateTime(Date fileCreateTime) { - this.fileCreateTime = fileCreateTime; - } - - public Date getWriteCompleteTime() { - return writeCompleteTime; - } - - public void setWriteCompleteTime(Date writeCompleteTime) { - this.writeCompleteTime = writeCompleteTime; - } - - public Date getDoneCompleteTime() { - return doneCompleteTime; - } - - public void setDoneCompleteTime(Date doneCompleteTime) { - this.doneCompleteTime = doneCompleteTime; - } - - public Date getLastSuccessTime() { - return lastSuccessTime; - } - - public void setLastSuccessTime(Date lastSuccessTime) { - this.lastSuccessTime = lastSuccessTime; - } - - public Date getLastFailedTime() { - return lastFailedTime; - } - - public void setLastFailedTime(Date lastFailedTime) { - this.lastFailedTime = lastFailedTime; - } - - public int getFailedAttemptCount() { - return failedAttemptCount; - } - - public void setFailedAttemptCount(int failedAttemptCount) { - this.failedAttemptCount = failedAttemptCount; - } - - public boolean getLastAttempt() { - return lastAttempt; - } - - public void setLastAttempt(boolean lastAttempt) { - this.lastAttempt = lastAttempt; - } - - @Override - public String toString() { - return "AuditIndexRecord [id=" + id + ", filePath=" + filePath - + ", linePosition=" + linePosition + ", status=" + status - + ", fileCreateTime=" + fileCreateTime - + ", writeCompleteTime=" + writeCompleteTime - + ", doneCompleteTime=" + doneCompleteTime - + ", lastSuccessTime=" + lastSuccessTime - + ", lastFailedTime=" + lastFailedTime - + ", failedAttemptCount=" + failedAttemptCount - + ", lastAttempt=" + lastAttempt + "]"; - } + String id; + String filePath; + int linePosition; + SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress; + Date fileCreateTime; + Date writeCompleteTime; + Date doneCompleteTime; + Date lastSuccessTime; + Date lastFailedTime; + int failedAttemptCount; + boolean lastAttempt; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getFilePath() { + return filePath; + } + + public void setFilePath(String filePath) { + this.filePath = filePath; + } + + public int getLinePosition() { + return linePosition; + } + + public void setLinePosition(int linePosition) { + this.linePosition = linePosition; + } + + public SPOOL_FILE_STATUS getStatus() { + return status; + } + + public void setStatus(SPOOL_FILE_STATUS status) { + this.status = status; + } + + public Date getFileCreateTime() { + return fileCreateTime; + } + + public void setFileCreateTime(Date fileCreateTime) { + this.fileCreateTime = fileCreateTime; + } + + public Date getWriteCompleteTime() { + return writeCompleteTime; + } + + public void setWriteCompleteTime(Date writeCompleteTime) { + this.writeCompleteTime = writeCompleteTime; + } + + public Date getDoneCompleteTime() { + return doneCompleteTime; + } + + public void setDoneCompleteTime(Date doneCompleteTime) { + this.doneCompleteTime = doneCompleteTime; + } + + public Date getLastSuccessTime() { + return lastSuccessTime; + } + + public void setLastSuccessTime(Date lastSuccessTime) { + this.lastSuccessTime = lastSuccessTime; + } + + public Date getLastFailedTime() { + return lastFailedTime; + } + + public void setLastFailedTime(Date lastFailedTime) { + this.lastFailedTime = lastFailedTime; + } + + public int getFailedAttemptCount() { + return failedAttemptCount; + } + + public void setFailedAttemptCount(int failedAttemptCount) { + this.failedAttemptCount = failedAttemptCount; + } + + public boolean getLastAttempt() { + return lastAttempt; + } + + public void setLastAttempt(boolean lastAttempt) { + this.lastAttempt = lastAttempt; + } + + @Override + public String toString() { + return "AuditIndexRecord [id=" + id + ", filePath=" + filePath + + ", linePosition=" + linePosition + ", status=" + status + + ", fileCreateTime=" + fileCreateTime + + ", writeCompleteTime=" + writeCompleteTime + + ", doneCompleteTime=" + doneCompleteTime + + ", lastSuccessTime=" + lastSuccessTime + + ", lastFailedTime=" + lastFailedTime + + ", failedAttemptCount=" + failedAttemptCount + + ", lastAttempt=" + lastAttempt + "]"; + } } - - diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java index 5ed88eb6ab..35ebd075e7 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java @@ -19,582 +19,557 @@ package org.apache.ranger.audit.model; -import java.util.Date; -import java.util.HashSet; -import java.util.Set; - -import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; import org.apache.commons.lang.StringUtils; -import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; @JsonSerialize public class AuthzAuditEvent extends AuditEventBase { - protected static String FIELD_SEPARATOR = ";"; + protected static final int MAX_ACTION_FIELD_SIZE = 1800; + protected static final int MAX_REQUEST_DATA_FIELD_SIZE = 1800; + protected static String FIELD_SEPARATOR = ";"; - protected static final int MAX_ACTION_FIELD_SIZE = 1800; - protected static final int MAX_REQUEST_DATA_FIELD_SIZE = 1800; + @JsonProperty("repoType") + protected int repositoryType; - @JsonProperty("repoType") - protected int repositoryType = 0; + @JsonProperty("repo") + protected String repositoryName; - @JsonProperty("repo") - protected String repositoryName = null; + @JsonProperty("reqUser") + protected String user; - @JsonProperty("reqUser") - protected String user = null; + @JsonProperty("evtTime") + protected Date eventTime = new Date(); - @JsonProperty("evtTime") - protected Date eventTime = new Date(); + @JsonProperty("access") + protected String accessType; - @JsonProperty("access") - protected String accessType = null; + @JsonProperty("resource") + protected String resourcePath; - @JsonProperty("resource") - protected String resourcePath = null; + @JsonProperty("resType") + protected String resourceType; - @JsonProperty("resType") - protected String resourceType = null; + @JsonProperty("action") + protected String action; - @JsonProperty("action") - protected String action = null; + @JsonProperty("result") + protected short accessResult; // 0 - DENIED; 1 - ALLOWED; HTTP return code - @JsonProperty("result") - protected short accessResult = 0; // 0 - DENIED; 1 - ALLOWED; HTTP return - // code + @JsonProperty("agent") + protected String agentId; - @JsonProperty("agent") - protected String agentId = null; + @JsonProperty("policy") + protected long policyId; - @JsonProperty("policy") - protected long policyId = 0; + @JsonProperty("reason") + protected String resultReason; - @JsonProperty("reason") - protected String resultReason = null; + @JsonProperty("enforcer") + protected String aclEnforcer; - @JsonProperty("enforcer") - protected String aclEnforcer = null; + @JsonProperty("sess") + protected String sessionId; - @JsonProperty("sess") - protected String sessionId = null; - - @JsonProperty("cliType") - protected String clientType = null; - - @JsonProperty("cliIP") - protected String clientIP = null; - - @JsonProperty("reqData") - protected String requestData = null; - - @JsonProperty("agentHost") - protected String agentHostname = null; - - @JsonProperty("logType") - protected String logType = null; - - @JsonProperty("id") - protected String eventId = null; - - /** - * This to ensure order within a session. Order not guaranteed across - * processes and hosts - */ - @JsonProperty("seq_num") - protected long seqNum = 0; - - @JsonProperty("event_count") - protected long eventCount = 1; - - @JsonProperty("event_dur_ms") - protected long eventDurationMS = 0; - - @JsonProperty("tags") - protected Set tags = new HashSet<>(); - - @JsonProperty("datasets") - protected Set datasets = null; - - @JsonProperty("projects") - protected Set projects = null; - - @JsonProperty("additional_info") - protected String additionalInfo; - - @JsonProperty("cluster_name") - protected String clusterName; - - @JsonProperty("zone_name") - protected String zoneName; - - @JsonProperty("policy_version") - protected Long policyVersion; - - public AuthzAuditEvent() { - super(); - - this.repositoryType = 0; - } - - public AuthzAuditEvent(int repositoryType, String repositoryName, - String user, Date eventTime, String accessType, - String resourcePath, String resourceType, String action, - short accessResult, String agentId, long policyId, - String resultReason, String aclEnforcer, String sessionId, - String clientType, String clientIP, String requestData, String clusterName) { - this(repositoryType, repositoryName, user, eventTime, accessType, resourcePath, resourceType, action, accessResult, agentId, - policyId, resultReason, aclEnforcer, sessionId, clientType, clientIP, requestData, clusterName, null); - } - - public AuthzAuditEvent(int repositoryType, String repositoryName, - String user, Date eventTime, String accessType, - String resourcePath, String resourceType, String action, - short accessResult, String agentId, long policyId, - String resultReason, String aclEnforcer, String sessionId, - String clientType, String clientIP, String requestData, String clusterName, String zoneName) { - this(repositoryType, repositoryName, user, eventTime, accessType, resourcePath, resourceType, action, accessResult, agentId, - policyId, resultReason, aclEnforcer, sessionId, clientType, clientIP, requestData, clusterName, zoneName, null); - - } - - public AuthzAuditEvent(int repositoryType, String repositoryName, - String user, Date eventTime, String accessType, - String resourcePath, String resourceType, String action, - short accessResult, String agentId, long policyId, - String resultReason, String aclEnforcer, String sessionId, - String clientType, String clientIP, String requestData, String clusterName, String zoneName, Long policyVersion) { - this.repositoryType = repositoryType; - this.repositoryName = repositoryName; - this.user = user; - this.eventTime = eventTime; - this.accessType = accessType; - this.resourcePath = resourcePath; - this.resourceType = resourceType; - this.action = action; - this.accessResult = accessResult; - this.agentId = agentId; - this.policyId = policyId; - this.resultReason = resultReason; - this.aclEnforcer = aclEnforcer; - this.sessionId = sessionId; - this.clientType = clientType; - this.clientIP = clientIP; - this.requestData = requestData; - this.clusterName = clusterName; - this.zoneName = zoneName; - this.policyVersion = policyVersion; - } - - /** - * @return the repositoryType - */ - public int getRepositoryType() { - return repositoryType; - } - - /** - * @param repositoryType - * the repositoryType to set - */ - public void setRepositoryType(int repositoryType) { - this.repositoryType = repositoryType; - } - - /** - * @return the repositoryName - */ - public String getRepositoryName() { - return repositoryName; - } - - /** - * @param repositoryName - * the repositoryName to set - */ - public void setRepositoryName(String repositoryName) { - this.repositoryName = repositoryName; - } - - /** - * @return the user - */ - public String getUser() { - return user; - } - - /** - * @param user - * the user to set - */ - public void setUser(String user) { - this.user = user; - } - - /** - * @return the timeStamp - */ - public Date getEventTime() { - return eventTime; - } - - /** - * @param eventTime - * the eventTime to set - */ - public void setEventTime(Date eventTime) { - this.eventTime = eventTime; - } - - /** - * @return the accessType - */ - public String getAccessType() { - return accessType; - } - - /** - * @param accessType - * the accessType to set - */ - public void setAccessType(String accessType) { - this.accessType = accessType; - } - - /** - * @return the resourcePath - */ - public String getResourcePath() { - return resourcePath; - } - - /** - * @param resourcePath - * the resourcePath to set - */ - public void setResourcePath(String resourcePath) { - this.resourcePath = resourcePath; - } - - /** - * @return the resourceType - */ - public String getResourceType() { - return resourceType; - } - - /** - * @param resourceType - * the resourceType to set - */ - public void setResourceType(String resourceType) { - this.resourceType = resourceType; - } - - /** - * @return the action - */ - public String getAction() { return action; } - - /** - * @param action - * the action to set - */ - public void setAction(String action) { - this.action = action; - } - - /** - * @return the accessResult - */ - public short getAccessResult() { - return accessResult; - } - - /** - * @param accessResult - * the accessResult to set - */ - public void setAccessResult(short accessResult) { - this.accessResult = accessResult; - } - - /** - * @return the agentId - */ - public String getAgentId() { - return agentId; - } - - /** - * @param agentId - * the agentId to set - */ - public void setAgentId(String agentId) { - this.agentId = agentId; - } - - /** - * @return the policyId - */ - public long getPolicyId() { - return policyId; - } - - /** - * @param policyId - * the policyId to set - */ - public void setPolicyId(long policyId) { - this.policyId = policyId; - } - - /** - * @return the resultReason - */ - public String getResultReason() { - return resultReason; - } - - /** - * @param resultReason - * the resultReason to set - */ - public void setResultReason(String resultReason) { - this.resultReason = resultReason; - } - - /** - * @return the aclEnforcer - */ - public String getAclEnforcer() { - return aclEnforcer; - } - - /** - * @param aclEnforcer - * the aclEnforcer to set - */ - public void setAclEnforcer(String aclEnforcer) { - this.aclEnforcer = aclEnforcer; - } - - /** - * @return the sessionId - */ - public String getSessionId() { - return sessionId; - } - - /** - * @param sessionId - * the sessionId to set - */ - public void setSessionId(String sessionId) { - this.sessionId = sessionId; - } - - /** - * @return the clientType - */ - public String getClientType() { - return clientType; - } - - /** - * @param clientType - * the clientType to set - */ - public void setClientType(String clientType) { - this.clientType = clientType; - } - - /** - * @return the clientIP - */ - public String getClientIP() { - return clientIP; - } - - /** - * @param clientIP - * the clientIP to set - */ - public void setClientIP(String clientIP) { - this.clientIP = clientIP; - } - - /** - * @return the requestData - */ - public String getRequestData() { return requestData; } - - /** - * @param requestData - * the requestData to set - */ - public void setRequestData(String requestData) { - this.requestData = requestData; - } - - public String getAgentHostname() { - return agentHostname; - } - - public void setAgentHostname(String agentHostname) { - this.agentHostname = agentHostname; - } - - public String getLogType() { - return logType; - } - - public void setLogType(String logType) { - this.logType = logType; - } - - public String getEventId() { - return eventId; - } - - public void setEventId(String eventId) { - this.eventId = eventId; - } - - public long getSeqNum() { - return seqNum; - } - - public void setSeqNum(long seqNum) { - this.seqNum = seqNum; - } - - public long getEventCount() { - return eventCount; - } - - public void setEventCount(long frequencyCount) { - this.eventCount = frequencyCount; - } - - public long getEventDurationMS() { - return eventDurationMS; - } - - public void setEventDurationMS(long frequencyDurationMS) { - this.eventDurationMS = frequencyDurationMS; - } - - public Set getTags() { - return tags; - } - - public void setTags(Set tags) { - this.tags = tags; - } - - public Set getDatasets() { - return datasets; - } - - public void setDatasets(Set datasets) { - this.datasets = datasets; - } - - public Set getProjects() { - return projects; - } - - public void setProjects(Set projects) { - this.projects = projects; - } - - public String getClusterName() { - return clusterName; - } - - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - - public String getZoneName() { - return zoneName; - } - - public void setPolicyVersion(Long policyVersion) { - this.policyVersion = policyVersion; - } - - public Long getPolicyVersion() { - return policyVersion; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getAdditionalInfo() { return this.additionalInfo; } - - public void setAdditionalInfo(String additionalInfo) { this.additionalInfo = additionalInfo; } - - @JsonIgnore - @Override - public String getEventKey() { - String key = user + "^" + accessType + "^" + resourcePath + "^" - + resourceType + "^" + action + "^" + accessResult + "^" - + sessionId + "^" + clientIP; - return key; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - sb.append("AuthzAuditEvent{"); - toString(sb); - sb.append("}"); - - return sb.toString(); - } - - protected StringBuilder toString(StringBuilder sb) { - sb.append("repositoryType=").append(repositoryType) - .append(FIELD_SEPARATOR).append("repositoryName=") - .append(repositoryName).append(FIELD_SEPARATOR).append("user=") - .append(user).append(FIELD_SEPARATOR).append("eventTime=") - .append(eventTime).append(FIELD_SEPARATOR) - .append("accessType=").append(accessType) - .append(FIELD_SEPARATOR).append("resourcePath=") - .append(resourcePath).append(FIELD_SEPARATOR) - .append("resourceType=").append(resourceType) - .append(FIELD_SEPARATOR).append("action=").append(action) - .append(FIELD_SEPARATOR).append("accessResult=") - .append(accessResult).append(FIELD_SEPARATOR) - .append("agentId=").append(agentId).append(FIELD_SEPARATOR) - .append("policyId=").append(policyId).append(FIELD_SEPARATOR) - .append("resultReason=").append(resultReason) - .append(FIELD_SEPARATOR).append("aclEnforcer=") - .append(aclEnforcer).append(FIELD_SEPARATOR) - .append("sessionId=").append(sessionId).append(FIELD_SEPARATOR) - .append("clientType=").append(clientType) - .append(FIELD_SEPARATOR).append("clientIP=").append(clientIP) - .append(FIELD_SEPARATOR).append("requestData=") - .append(requestData).append(FIELD_SEPARATOR) - .append("agentHostname=").append(agentHostname) - .append(FIELD_SEPARATOR).append("logType=").append(logType) - .append(FIELD_SEPARATOR).append("eventId=").append(eventId) - .append(FIELD_SEPARATOR).append("seq_num=").append(seqNum) - .append(FIELD_SEPARATOR).append("event_count=") - .append(eventCount).append(FIELD_SEPARATOR) - .append("event_dur_ms=").append(eventDurationMS) - .append(FIELD_SEPARATOR).append("tags=").append("[").append(StringUtils.join(tags, ", ")).append("]") - .append(FIELD_SEPARATOR).append("datasets=").append("[").append(datasets != null ? StringUtils.join(datasets, ", ") : "").append("]") - .append(FIELD_SEPARATOR).append("projects=").append("[").append(projects != null ? StringUtils.join(projects, ", ") : "").append("]") - .append(FIELD_SEPARATOR).append("clusterName=").append(clusterName) - .append(FIELD_SEPARATOR).append("zoneName=").append(zoneName) - .append(FIELD_SEPARATOR).append("policyVersion=").append(policyVersion) - .append(FIELD_SEPARATOR).append("additionalInfo=").append(additionalInfo); - - return sb; - } + @JsonProperty("cliType") + protected String clientType; + + @JsonProperty("cliIP") + protected String clientIP; + + @JsonProperty("reqData") + protected String requestData; + + @JsonProperty("agentHost") + protected String agentHostname; + + @JsonProperty("logType") + protected String logType; + + @JsonProperty("id") + protected String eventId; + + /** + * This to ensure order within a session. Order not guaranteed across + * processes and hosts + */ + @JsonProperty("seq_num") + protected long seqNum; + + @JsonProperty("event_count") + protected long eventCount = 1; + + @JsonProperty("event_dur_ms") + protected long eventDurationMS; + + @JsonProperty("tags") + protected Set tags = new HashSet<>(); + + @JsonProperty("datasets") + protected Set datasets; + + @JsonProperty("projects") + protected Set projects; + + @JsonProperty("additional_info") + protected String additionalInfo; + + @JsonProperty("cluster_name") + protected String clusterName; + + @JsonProperty("zone_name") + protected String zoneName; + + @JsonProperty("policy_version") + protected Long policyVersion; + + public AuthzAuditEvent() { + super(); + + this.repositoryType = 0; + } + + public AuthzAuditEvent(int repositoryType, String repositoryName, String user, Date eventTime, String accessType, + String resourcePath, String resourceType, String action, short accessResult, String agentId, long policyId, + String resultReason, String aclEnforcer, String sessionId, String clientType, String clientIP, String requestData, String clusterName) { + this(repositoryType, repositoryName, user, eventTime, accessType, resourcePath, resourceType, action, accessResult, agentId, + policyId, resultReason, aclEnforcer, sessionId, clientType, clientIP, requestData, clusterName, null); + } + + public AuthzAuditEvent(int repositoryType, String repositoryName, String user, Date eventTime, String accessType, + String resourcePath, String resourceType, String action, short accessResult, String agentId, long policyId, + String resultReason, String aclEnforcer, String sessionId, String clientType, String clientIP, String requestData, String clusterName, String zoneName) { + this(repositoryType, repositoryName, user, eventTime, accessType, resourcePath, resourceType, action, accessResult, agentId, + policyId, resultReason, aclEnforcer, sessionId, clientType, clientIP, requestData, clusterName, zoneName, null); + } + + public AuthzAuditEvent(int repositoryType, String repositoryName, String user, Date eventTime, String accessType, + String resourcePath, String resourceType, String action, short accessResult, String agentId, long policyId, + String resultReason, String aclEnforcer, String sessionId, String clientType, String clientIP, String requestData, String clusterName, String zoneName, Long policyVersion) { + this.repositoryType = repositoryType; + this.repositoryName = repositoryName; + this.user = user; + this.eventTime = eventTime; + this.accessType = accessType; + this.resourcePath = resourcePath; + this.resourceType = resourceType; + this.action = action; + this.accessResult = accessResult; + this.agentId = agentId; + this.policyId = policyId; + this.resultReason = resultReason; + this.aclEnforcer = aclEnforcer; + this.sessionId = sessionId; + this.clientType = clientType; + this.clientIP = clientIP; + this.requestData = requestData; + this.clusterName = clusterName; + this.zoneName = zoneName; + this.policyVersion = policyVersion; + } + + /** + * @return the repositoryType + */ + public int getRepositoryType() { + return repositoryType; + } + + /** + * @param repositoryType the repositoryType to set + */ + public void setRepositoryType(int repositoryType) { + this.repositoryType = repositoryType; + } + + /** + * @return the repositoryName + */ + public String getRepositoryName() { + return repositoryName; + } + + /** + * @param repositoryName the repositoryName to set + */ + public void setRepositoryName(String repositoryName) { + this.repositoryName = repositoryName; + } + + /** + * @return the user + */ + public String getUser() { + return user; + } + + /** + * @param user the user to set + */ + public void setUser(String user) { + this.user = user; + } + + /** + * @return the accessType + */ + public String getAccessType() { + return accessType; + } + + /** + * @param accessType the accessType to set + */ + public void setAccessType(String accessType) { + this.accessType = accessType; + } + + /** + * @return the resourcePath + */ + public String getResourcePath() { + return resourcePath; + } + + /** + * @param resourcePath the resourcePath to set + */ + public void setResourcePath(String resourcePath) { + this.resourcePath = resourcePath; + } + + /** + * @return the resourceType + */ + public String getResourceType() { + return resourceType; + } + + /** + * @param resourceType the resourceType to set + */ + public void setResourceType(String resourceType) { + this.resourceType = resourceType; + } + + /** + * @return the action + */ + public String getAction() { + return action; + } + + /** + * @param action the action to set + */ + public void setAction(String action) { + this.action = action; + } + + /** + * @return the accessResult + */ + public short getAccessResult() { + return accessResult; + } + + /** + * @param accessResult the accessResult to set + */ + public void setAccessResult(short accessResult) { + this.accessResult = accessResult; + } + + /** + * @return the agentId + */ + public String getAgentId() { + return agentId; + } + + /** + * @param agentId the agentId to set + */ + public void setAgentId(String agentId) { + this.agentId = agentId; + } + + /** + * @return the policyId + */ + public long getPolicyId() { + return policyId; + } + + /** + * @param policyId the policyId to set + */ + public void setPolicyId(long policyId) { + this.policyId = policyId; + } + + /** + * @return the resultReason + */ + public String getResultReason() { + return resultReason; + } + + /** + * @param resultReason the resultReason to set + */ + public void setResultReason(String resultReason) { + this.resultReason = resultReason; + } + + /** + * @return the aclEnforcer + */ + public String getAclEnforcer() { + return aclEnforcer; + } + + /** + * @param aclEnforcer the aclEnforcer to set + */ + public void setAclEnforcer(String aclEnforcer) { + this.aclEnforcer = aclEnforcer; + } + + /** + * @return the sessionId + */ + public String getSessionId() { + return sessionId; + } + + /** + * @param sessionId the sessionId to set + */ + public void setSessionId(String sessionId) { + this.sessionId = sessionId; + } + + /** + * @return the clientType + */ + public String getClientType() { + return clientType; + } + + /** + * @param clientType the clientType to set + */ + public void setClientType(String clientType) { + this.clientType = clientType; + } + + /** + * @return the clientIP + */ + public String getClientIP() { + return clientIP; + } + + /** + * @param clientIP the clientIP to set + */ + public void setClientIP(String clientIP) { + this.clientIP = clientIP; + } + + /** + * @return the requestData + */ + public String getRequestData() { + return requestData; + } + + /** + * @param requestData the requestData to set + */ + public void setRequestData(String requestData) { + this.requestData = requestData; + } + + public String getAgentHostname() { + return agentHostname; + } + + public void setAgentHostname(String agentHostname) { + this.agentHostname = agentHostname; + } + + public String getLogType() { + return logType; + } + + public void setLogType(String logType) { + this.logType = logType; + } + + public String getEventId() { + return eventId; + } + + public void setEventId(String eventId) { + this.eventId = eventId; + } + + public long getSeqNum() { + return seqNum; + } + + public void setSeqNum(long seqNum) { + this.seqNum = seqNum; + } + + public long getEventCount() { + return eventCount; + } + + public void setEventCount(long frequencyCount) { + this.eventCount = frequencyCount; + } + + public long getEventDurationMS() { + return eventDurationMS; + } + + public void setEventDurationMS(long frequencyDurationMS) { + this.eventDurationMS = frequencyDurationMS; + } + + public Set getTags() { + return tags; + } + + public void setTags(Set tags) { + this.tags = tags; + } + + public Set getDatasets() { + return datasets; + } + + public void setDatasets(Set datasets) { + this.datasets = datasets; + } + + public Set getProjects() { + return projects; + } + + public void setProjects(Set projects) { + this.projects = projects; + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public Long getPolicyVersion() { + return policyVersion; + } + + public void setPolicyVersion(Long policyVersion) { + this.policyVersion = policyVersion; + } + + public String getAdditionalInfo() { + return this.additionalInfo; + } + + public void setAdditionalInfo(String additionalInfo) { + this.additionalInfo = additionalInfo; + } + + @JsonIgnore + @Override + public String getEventKey() { + return user + "^" + accessType + "^" + resourcePath + "^" + resourceType + "^" + action + "^" + accessResult + "^" + sessionId + "^" + clientIP; + } + + /** + * @return the timeStamp + */ + public Date getEventTime() { + return eventTime; + } + + /** + * @param eventTime the eventTime to set + */ + public void setEventTime(Date eventTime) { + this.eventTime = eventTime; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("AuthzAuditEvent{"); + toString(sb); + sb.append("}"); + + return sb.toString(); + } + + protected StringBuilder toString(StringBuilder sb) { + sb.append("repositoryType=").append(repositoryType) + .append(FIELD_SEPARATOR).append("repositoryName=") + .append(repositoryName).append(FIELD_SEPARATOR).append("user=") + .append(user).append(FIELD_SEPARATOR).append("eventTime=") + .append(eventTime).append(FIELD_SEPARATOR) + .append("accessType=").append(accessType) + .append(FIELD_SEPARATOR).append("resourcePath=") + .append(resourcePath).append(FIELD_SEPARATOR) + .append("resourceType=").append(resourceType) + .append(FIELD_SEPARATOR).append("action=").append(action) + .append(FIELD_SEPARATOR).append("accessResult=") + .append(accessResult).append(FIELD_SEPARATOR) + .append("agentId=").append(agentId).append(FIELD_SEPARATOR) + .append("policyId=").append(policyId).append(FIELD_SEPARATOR) + .append("resultReason=").append(resultReason) + .append(FIELD_SEPARATOR).append("aclEnforcer=") + .append(aclEnforcer).append(FIELD_SEPARATOR) + .append("sessionId=").append(sessionId).append(FIELD_SEPARATOR) + .append("clientType=").append(clientType) + .append(FIELD_SEPARATOR).append("clientIP=").append(clientIP) + .append(FIELD_SEPARATOR).append("requestData=") + .append(requestData).append(FIELD_SEPARATOR) + .append("agentHostname=").append(agentHostname) + .append(FIELD_SEPARATOR).append("logType=").append(logType) + .append(FIELD_SEPARATOR).append("eventId=").append(eventId) + .append(FIELD_SEPARATOR).append("seq_num=").append(seqNum) + .append(FIELD_SEPARATOR).append("event_count=") + .append(eventCount).append(FIELD_SEPARATOR) + .append("event_dur_ms=").append(eventDurationMS) + .append(FIELD_SEPARATOR).append("tags=").append("[").append(StringUtils.join(tags, ", ")).append("]") + .append(FIELD_SEPARATOR).append("datasets=").append("[").append(datasets != null ? StringUtils.join(datasets, ", ") : "").append("]") + .append(FIELD_SEPARATOR).append("projects=").append("[").append(projects != null ? StringUtils.join(projects, ", ") : "").append("]") + .append(FIELD_SEPARATOR).append("clusterName=").append(clusterName) + .append(FIELD_SEPARATOR).append("zoneName=").append(zoneName) + .append(FIELD_SEPARATOR).append("policyVersion=").append(policyVersion) + .append(FIELD_SEPARATOR).append("additionalInfo=").append(additionalInfo); + + return sb; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/EnumRepositoryType.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/EnumRepositoryType.java index a8364c2015..28b30f8688 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/model/EnumRepositoryType.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/EnumRepositoryType.java @@ -17,21 +17,22 @@ * under the License. */ - package org.apache.ranger.audit.model; +package org.apache.ranger.audit.model; public final class EnumRepositoryType { - - public static final int HDFS = 1; - - public static final int HBASE = 2; - - public static final int HIVE = 3; - - public static final int XAAGENT = 4; - - public static final int KNOX = 5; - - public static final int STORM = 6; - - + public static final int HDFS = 1; + + public static final int HBASE = 2; + + public static final int HIVE = 3; + + public static final int XAAGENT = 4; + + public static final int KNOX = 5; + + public static final int STORM = 6; + + private EnumRepositoryType() { + // to block instantiation + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java index 3765c9c923..5156cf27be 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java @@ -20,5 +20,5 @@ package org.apache.ranger.audit.model; public enum SPOOL_FILE_STATUS { - pending, write_inprogress, read_inprogress, done + pending, write_inprogress, read_inprogress, done } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java index 2a3ec77b6a..ea3d4f7ad9 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java @@ -17,273 +17,273 @@ * under the License. */ - package org.apache.ranger.audit.provider; +package org.apache.ranger.audit.provider; -import java.util.concurrent.BlockingQueue; +import org.apache.ranger.audit.model.AuditEventBase; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Properties; import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import java.util.Properties; -import org.apache.ranger.audit.model.AuditEventBase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +public class AsyncAuditProvider extends MultiDestAuditProvider implements Runnable { + private static final Logger LOG = LoggerFactory.getLogger(AsyncAuditProvider.class); + + private static final int mStopLoopIntervalSecs = 1; // 1 second + private static final int mWaitToCompleteLoopIntervalSecs = 1; // 1 second + private static int sThreadCount; + + private final BlockingQueue mQueue; + private final String mName; + + // Summary of logs handled + private final AtomicLong lifeTimeInLogCount = new AtomicLong(0); // Total count, including drop count + private final AtomicLong lifeTimeOutLogCount = new AtomicLong(0); + private final AtomicLong lifeTimeDropCount = new AtomicLong(0); + private final AtomicLong intervalInLogCount = new AtomicLong(0); + private final AtomicLong intervalOutLogCount = new AtomicLong(0); + private final AtomicLong intervalDropCount = new AtomicLong(0); + + private Thread mThread; + private int mMaxQueueSize = 10 * 1024; + private int mMaxFlushInterval = 5000; // 5 seconds + private long lastIntervalLogTime = System.currentTimeMillis(); + private int intervalLogDurationMS = 60000; + private long lastFlushTime = System.currentTimeMillis(); + + public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval) { + LOG.info("AsyncAuditProvider({}): creating..", name); + + if (maxQueueSize < 1) { + LOG.warn("AsyncAuditProvider({}): invalid maxQueueSize={}. will use default {}", name, maxQueueSize, mMaxQueueSize); + + maxQueueSize = mMaxQueueSize; + } + + mName = name; + mMaxQueueSize = maxQueueSize; + mMaxFlushInterval = maxFlushInterval; + + mQueue = new ArrayBlockingQueue<>(mMaxQueueSize); + } + + public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval, AuditHandler provider) { + this(name, maxQueueSize, maxFlushInterval); + + addAuditProvider(provider); + } + + @Override + public void init(Properties props) { + LOG.info("AsyncAuditProvider({}).init()", mName); + + super.init(props); + } + + @Override + public boolean log(AuditEventBase event) { + LOG.debug("AsyncAuditProvider.logEvent(AuditEventBase)"); + + queueEvent(event); + return true; + } + + @Override + public void start() { + mThread = new Thread(this, "AsyncAuditProvider" + (++sThreadCount)); + + mThread.setDaemon(true); + mThread.start(); + + super.start(); + } + + @Override + public void stop() { + LOG.info("==> AsyncAuditProvider.stop()"); + + try { + LOG.info("Interrupting child thread of {}...", mName); + + mThread.interrupt(); + + while (mThread.isAlive()) { + try { + LOG.info("Waiting for child thread of {} to exit. Sleeping for {} secs", mName, mStopLoopIntervalSecs); + + mThread.join(mStopLoopIntervalSecs * 1000L); + } catch (InterruptedException e) { + LOG.warn("Interrupted while waiting for child thread to join! Proceeding with stop", e); + break; + } + } + + super.stop(); + } finally { + LOG.info("<== AsyncAuditProvider.stop()"); + } + } + + @Override + public void waitToComplete() { + waitToComplete(0); + + super.waitToComplete(); + } + + public void waitToComplete(long maxWaitSeconds) { + LOG.debug("==> AsyncAuditProvider.waitToComplete()"); + + try { + for (long waitTime = 0; !isEmpty() && (maxWaitSeconds <= 0 || maxWaitSeconds > waitTime); waitTime += mWaitToCompleteLoopIntervalSecs) { + try { + LOG.info("{} messages yet to be flushed by {}. Sleeoping for {} sec", mQueue.size(), mName, mWaitToCompleteLoopIntervalSecs); + + Thread.sleep(mWaitToCompleteLoopIntervalSecs * 1000L); + } catch (InterruptedException excp) { + // someone really wants service to exit, abandon unwritten audits and exit. + LOG.warn("Caught interrupted exception! {} messages still unflushed! Won't wait for queue to flush, exiting...", mQueue.size(), excp); + + break; + } + } + } finally { + LOG.debug("<== AsyncAuditProvider.waitToComplete()"); + } + } + + public int getIntervalLogDurationMS() { + return intervalLogDurationMS; + } + + public void setIntervalLogDurationMS(int intervalLogDurationMS) { + this.intervalLogDurationMS = intervalLogDurationMS; + } + + @Override + public void run() { + LOG.info("==> AsyncAuditProvider.run()"); + + while (true) { + AuditEventBase event = null; + try { + event = dequeueEvent(); + + if (event != null) { + super.log(event); + } else { + lastFlushTime = System.currentTimeMillis(); + + flush(); + } + } catch (InterruptedException excp) { + LOG.info("AsyncAuditProvider.run - Interrupted! Breaking out of while loop."); + + break; + } catch (Exception excp) { + logFailedEvent(event, excp); + } + } + + try { + lastFlushTime = System.currentTimeMillis(); + + flush(); + } catch (Exception excp) { + LOG.error("AsyncAuditProvider.run()", excp); + } + + LOG.info("<== AsyncAuditProvider.run()"); + } + + private void queueEvent(AuditEventBase event) { + // Increase counts + lifeTimeInLogCount.incrementAndGet(); + intervalInLogCount.incrementAndGet(); + + if (!mQueue.offer(event)) { + lifeTimeDropCount.incrementAndGet(); + intervalDropCount.incrementAndGet(); + } + } + + private AuditEventBase dequeueEvent() throws InterruptedException { + AuditEventBase ret = mQueue.poll(); + + while (ret == null) { + logSummaryIfRequired(); + + if (mMaxFlushInterval > 0) { + long timeTillNextFlush = getTimeTillNextFlush(); + + if (timeTillNextFlush <= 0) { + break; // force flush + } + + ret = mQueue.poll(timeTillNextFlush, TimeUnit.MILLISECONDS); + } else { + // Let's wake up for summary logging + long waitTime = intervalLogDurationMS - (System.currentTimeMillis() - lastIntervalLogTime); + + if (waitTime <= 0) { + waitTime = intervalLogDurationMS; + } + + ret = mQueue.poll(waitTime, TimeUnit.MILLISECONDS); + } + } + + if (ret != null) { + lifeTimeOutLogCount.incrementAndGet(); + intervalOutLogCount.incrementAndGet(); + } + + logSummaryIfRequired(); + + return ret; + } + + private void logSummaryIfRequired() { + long intervalSinceLastLog = System.currentTimeMillis() - lastIntervalLogTime; + + if (intervalSinceLastLog > intervalLogDurationMS) { + if (intervalInLogCount.get() > 0 || intervalOutLogCount.get() > 0) { + long queueSize = mQueue.size(); + + LOG.info("AsyncAuditProvider-stats:{}: past {}: inLogs={}, outLogs={}, dropped={}, currentQueueSize={}", mName, formatIntervalForLog(intervalSinceLastLog), intervalInLogCount, intervalOutLogCount, intervalDropCount, queueSize); + LOG.info("AsyncAuditProvider-stats:{}: process lifetime: inLogs={}, outLogs={}, dropped={}", mName, lifeTimeInLogCount, lifeTimeOutLogCount, lifeTimeDropCount); + } + + lastIntervalLogTime = System.currentTimeMillis(); + + intervalInLogCount.set(0); + intervalOutLogCount.set(0); + intervalDropCount.set(0); + } + } + + private boolean isEmpty() { + return mQueue.isEmpty(); + } + + private long getTimeTillNextFlush() { + long timeTillNextFlush = mMaxFlushInterval; + + if (mMaxFlushInterval > 0) { + if (lastFlushTime != 0) { + long timeSinceLastFlush = System.currentTimeMillis() - lastFlushTime; + + if (timeSinceLastFlush >= mMaxFlushInterval) { + timeTillNextFlush = 0; + } else { + timeTillNextFlush = mMaxFlushInterval - timeSinceLastFlush; + } + } + } -public class AsyncAuditProvider extends MultiDestAuditProvider implements - Runnable { - - private static final Logger LOG = LoggerFactory.getLogger(AsyncAuditProvider.class); - - private static int sThreadCount = 0; - - private BlockingQueue mQueue = null; - private Thread mThread = null; - private String mName = null; - private int mMaxQueueSize = 10 * 1024; - private int mMaxFlushInterval = 5000; // 5 seconds - - private static final int mStopLoopIntervalSecs = 1; // 1 second - private static final int mWaitToCompleteLoopIntervalSecs = 1; // 1 second - - // Summary of logs handled - private AtomicLong lifeTimeInLogCount = new AtomicLong(0); // Total count, including drop count - private AtomicLong lifeTimeOutLogCount = new AtomicLong(0); - private AtomicLong lifeTimeDropCount = new AtomicLong(0); - private AtomicLong intervalInLogCount = new AtomicLong(0); - private AtomicLong intervalOutLogCount = new AtomicLong(0); - private AtomicLong intervalDropCount = new AtomicLong(0); - private long lastIntervalLogTime = System.currentTimeMillis(); - private int intervalLogDurationMS = 60000; - private long lastFlushTime = System.currentTimeMillis(); - - public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval) { - LOG.info("AsyncAuditProvider(" + name + "): creating.."); - - if(maxQueueSize < 1) { - LOG.warn("AsyncAuditProvider(" + name + "): invalid maxQueueSize=" + maxQueueSize + ". will use default " + mMaxQueueSize); - - maxQueueSize = mMaxQueueSize; - } - - mName = name; - mMaxQueueSize = maxQueueSize; - mMaxFlushInterval = maxFlushInterval; - - mQueue = new ArrayBlockingQueue(mMaxQueueSize); - } - - public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval, AuditHandler provider) { - this(name, maxQueueSize, maxFlushInterval); - - addAuditProvider(provider); - } - - @Override - public void init(Properties props) { - LOG.info("AsyncAuditProvider(" + mName + ").init()"); - - super.init(props); - } - - public int getIntervalLogDurationMS() { - return intervalLogDurationMS; - } - - public void setIntervalLogDurationMS(int intervalLogDurationMS) { - this.intervalLogDurationMS = intervalLogDurationMS; - } - - @Override - public boolean log(AuditEventBase event) { - LOG.debug("AsyncAuditProvider.logEvent(AuditEventBase)"); - - queueEvent(event); - return true; - } - - @Override - public void start() { - mThread = new Thread(this, "AsyncAuditProvider" + (++sThreadCount)); - - mThread.setDaemon(true); - mThread.start(); - - super.start(); - } - - @Override - public void stop() { - LOG.info("==> AsyncAuditProvider.stop()"); - try { - LOG.info("Interrupting child thread of " + mName + "..." ); - mThread.interrupt(); - while (mThread.isAlive()) { - try { - LOG.info(String.format("Waiting for child thread of %s to exit. Sleeping for %d secs", mName, mStopLoopIntervalSecs)); - mThread.join(mStopLoopIntervalSecs * 1000L); - } catch (InterruptedException e) { - LOG.warn("Interrupted while waiting for child thread to join! Proceeding with stop", e); - break; - } - } - - super.stop(); - } finally { - LOG.info("<== AsyncAuditProvider.stop()"); - } - } - - @Override - public void waitToComplete() { - waitToComplete(0); - - super.waitToComplete(); - } - - @Override - public void run() { - LOG.info("==> AsyncAuditProvider.run()"); - - while (true) { - AuditEventBase event = null; - try { - event = dequeueEvent(); - - if (event != null) { - super.log(event); - } else { - lastFlushTime = System.currentTimeMillis(); - flush(); - } - } catch (InterruptedException excp) { - LOG.info("AsyncAuditProvider.run - Interrupted! Breaking out of while loop."); - break; - } catch (Exception excp) { - logFailedEvent(event, excp); - } - } - - try { - lastFlushTime = System.currentTimeMillis(); - flush(); - } catch (Exception excp) { - LOG.error("AsyncAuditProvider.run()", excp); - } - - LOG.info("<== AsyncAuditProvider.run()"); - } - - private void queueEvent(AuditEventBase event) { - // Increase counts - lifeTimeInLogCount.incrementAndGet(); - intervalInLogCount.incrementAndGet(); - - if(! mQueue.offer(event)) { - lifeTimeDropCount.incrementAndGet(); - intervalDropCount.incrementAndGet(); - } - } - - private AuditEventBase dequeueEvent() throws InterruptedException { - AuditEventBase ret = mQueue.poll(); - - while(ret == null) { - logSummaryIfRequired(); - - if (mMaxFlushInterval > 0 ) { - long timeTillNextFlush = getTimeTillNextFlush(); - - if (timeTillNextFlush <= 0) { - break; // force flush - } - - ret = mQueue.poll(timeTillNextFlush, TimeUnit.MILLISECONDS); - } else { - // Let's wake up for summary logging - long waitTime = intervalLogDurationMS - (System.currentTimeMillis() - lastIntervalLogTime); - waitTime = waitTime <= 0 ? intervalLogDurationMS : waitTime; - - ret = mQueue.poll(waitTime, TimeUnit.MILLISECONDS); - } - } - - if(ret != null) { - lifeTimeOutLogCount.incrementAndGet(); - intervalOutLogCount.incrementAndGet(); - } - - logSummaryIfRequired(); - - return ret; - } - - private void logSummaryIfRequired() { - long intervalSinceLastLog = System.currentTimeMillis() - lastIntervalLogTime; - - if (intervalSinceLastLog > intervalLogDurationMS) { - if (intervalInLogCount.get() > 0 || intervalOutLogCount.get() > 0 ) { - long queueSize = mQueue.size(); - - LOG.info("AsyncAuditProvider-stats:" + mName + ": past " + formatIntervalForLog(intervalSinceLastLog) - + ": inLogs=" + intervalInLogCount.get() - + ", outLogs=" + intervalOutLogCount.get() - + ", dropped=" + intervalDropCount.get() - + ", currentQueueSize=" + queueSize); - - LOG.info("AsyncAuditProvider-stats:" + mName + ": process lifetime" - + ": inLogs=" + lifeTimeInLogCount.get() - + ", outLogs=" + lifeTimeOutLogCount.get() - + ", dropped=" + lifeTimeDropCount.get()); - } - - lastIntervalLogTime = System.currentTimeMillis(); - intervalInLogCount.set(0); - intervalOutLogCount.set(0); - intervalDropCount.set(0); - } - } - - private boolean isEmpty() { - return mQueue.isEmpty(); - } - - public void waitToComplete(long maxWaitSeconds) { - LOG.debug("==> AsyncAuditProvider.waitToComplete()"); - - try { - for (long waitTime = 0; !isEmpty() - && (maxWaitSeconds <= 0 || maxWaitSeconds > waitTime); waitTime += mWaitToCompleteLoopIntervalSecs) { - try { - LOG.info(String.format("%d messages yet to be flushed by %s. Sleeoping for %d sec", mQueue.size(), mName, mWaitToCompleteLoopIntervalSecs)); - Thread.sleep(mWaitToCompleteLoopIntervalSecs * 1000L); - } catch (InterruptedException excp) { - // someone really wants service to exit, abandon unwritten audits and exit. - LOG.warn("Caught interrupted exception! " + mQueue.size() + " messages still unflushed! Won't wait for queue to flush, exiting...", excp); - break; - } - } - } finally { - LOG.debug("<== AsyncAuditProvider.waitToComplete()"); - } - } - - private long getTimeTillNextFlush() { - long timeTillNextFlush = mMaxFlushInterval; - - if (mMaxFlushInterval > 0) { - - if (lastFlushTime != 0) { - long timeSinceLastFlush = System.currentTimeMillis() - - lastFlushTime; - - if (timeSinceLastFlush >= mMaxFlushInterval) { - timeTillNextFlush = 0; - } else { - timeTillNextFlush = mMaxFlushInterval - timeSinceLastFlush; - } - } - } - - return timeTillNextFlush; - } + return timeTillNextFlush; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java index 7957f2e10e..9d6894f75e 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java @@ -31,49 +31,60 @@ */ public class AuditFileCacheProvider extends BaseAuditHandler { - private static final Logger logger = LoggerFactory.getLogger(AuditFileCacheProvider.class); + private static final Logger logger = LoggerFactory.getLogger(AuditFileCacheProvider.class); - AuditFileCacheProviderSpool fileSpooler = null; - AuditHandler consumer = null; + AuditFileCacheProviderSpool fileSpooler; + AuditHandler consumer; AuditFileCacheProvider(AuditHandler consumer) { this.consumer = consumer; } + @Override + public boolean log(AuditEventBase event) { + boolean ret = false; + + if (event != null) { + fileSpooler.stashLogs(event); + + if (fileSpooler.isSpoolingSuccessful()) { + ret = true; + } + } + + return ret; + } + public void init(Properties prop, String basePropertyName) { String propPrefix = "xasecure.audit.filecache"; + if (basePropertyName != null) { propPrefix = basePropertyName; } + super.init(prop, propPrefix); + //init Consumer if (consumer != null) { consumer.init(prop, propPrefix); } + //init AuditFileCacheSpooler fileSpooler = new AuditFileCacheProviderSpool(consumer); - fileSpooler.init(prop,propPrefix); - } - @Override - public boolean log(AuditEventBase event) { - boolean ret = false; - if ( event != null) { - fileSpooler.stashLogs(event); - if ( fileSpooler.isSpoolingSuccessful()) { - ret = true; - } - } - return ret; + + fileSpooler.init(prop, propPrefix); } @Override public boolean log(Collection events) { boolean ret = true; - if ( events != null) { + + if (events != null) { for (AuditEventBase event : events) { ret = log(event); } } + return ret; } @@ -83,6 +94,7 @@ public void start() { if (consumer != null) { consumer.start(); } + if (fileSpooler != null) { // start AuditFileSpool thread fileSpooler.start(); @@ -91,7 +103,8 @@ public void start() { @Override public void stop() { - logger.info("Stop called. name=" + getName()); + logger.info("stop() called. name={}", getName()); + if (consumer != null) { consumer.stop(); } @@ -99,24 +112,27 @@ public void stop() { @Override public void waitToComplete() { - logger.info("waitToComplete called. name=" + getName()); - if ( consumer != null) { + logger.info("waitToComplete() called. name={}", getName()); + + if (consumer != null) { consumer.waitToComplete(); } } @Override public void waitToComplete(long timeout) { - logger.info("waitToComplete called. name=" + getName()); - if ( consumer != null) { + logger.info("waitToComplete(timeout={}) called. name={}", timeout, getName()); + + if (consumer != null) { consumer.waitToComplete(timeout); } } @Override public void flush() { - logger.info("waitToComplete. name=" + getName()); - if ( consumer != null) { + logger.info("flush() called. name={}", getName()); + + if (consumer != null) { consumer.flush(); } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java index dd02255fc9..8c51539d51 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,25 +18,33 @@ package org.apache.ranger.audit.provider; +import org.apache.ranger.audit.model.AuditEventBase; + import java.io.File; import java.util.Collection; import java.util.Properties; -import org.apache.ranger.audit.model.AuditEventBase; - public interface AuditHandler { - boolean log(AuditEventBase event); - boolean log(Collection events); + boolean log(AuditEventBase event); + + boolean log(Collection events); + + boolean logJSON(String event); + + boolean logJSON(Collection events); - boolean logJSON(String event); - boolean logJSON(Collection events); boolean logFile(File file); void init(Properties prop); + void init(Properties prop, String basePropertyName); + void start(); + void stop(); + void waitToComplete(); + void waitToComplete(long timeout); /** diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditMessageException.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditMessageException.java index 3ff04eee6a..bed7ed5074 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditMessageException.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditMessageException.java @@ -25,43 +25,40 @@ * a transient error */ public class AuditMessageException extends Exception { + private static final long serialVersionUID = 1L; - private static final long serialVersionUID = 1L; + public AuditMessageException() { + } - public AuditMessageException() { - } + /** + * @param message + */ + public AuditMessageException(String message) { + super(message); + } - /** - * @param message - */ - public AuditMessageException(String message) { - super(message); - } + /** + * @param cause + */ + public AuditMessageException(Throwable cause) { + super(cause); + } - /** - * @param cause - */ - public AuditMessageException(Throwable cause) { - super(cause); - } - - /** - * @param message - * @param cause - */ - public AuditMessageException(String message, Throwable cause) { - super(message, cause); - } - - /** - * @param message - * @param cause - * @param enableSuppression - * @param writableStackTrace - */ - public AuditMessageException(String message, Throwable cause, - boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } + /** + * @param message + * @param cause + */ + public AuditMessageException(String message, Throwable cause) { + super(message, cause); + } + /** + * @param message + * @param cause + * @param enableSuppression + * @param writableStackTrace + */ + public AuditMessageException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java index c10dd9ffaa..619c94c373 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java @@ -18,16 +18,13 @@ package org.apache.ranger.audit.provider; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.ranger.audit.destination.*; +import org.apache.ranger.audit.destination.AmazonCloudWatchAuditDestination; +import org.apache.ranger.audit.destination.ElasticSearchAuditDestination; +import org.apache.ranger.audit.destination.FileAuditDestination; +import org.apache.ranger.audit.destination.HDFSAuditDestination; +import org.apache.ranger.audit.destination.Log4JAuditDestination; +import org.apache.ranger.audit.destination.SolrAuditDestination; import org.apache.ranger.audit.provider.hdfs.HdfsAuditProvider; import org.apache.ranger.audit.provider.kafka.KafkaAuditProvider; import org.apache.ranger.audit.provider.solr.SolrAuditProvider; @@ -39,6 +36,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + /* * TODO: * 1) Flag to enable/disable audit logging @@ -47,511 +52,520 @@ */ public class AuditProviderFactory { - private static final Logger LOG = LoggerFactory - .getLogger(AuditProviderFactory.class); - - public static final String AUDIT_IS_ENABLED_PROP = "xasecure.audit.is.enabled"; - public static final String AUDIT_HDFS_IS_ENABLED_PROP = "xasecure.audit.hdfs.is.enabled"; - public static final String AUDIT_LOG4J_IS_ENABLED_PROP = "xasecure.audit.log4j.is.enabled"; - public static final String AUDIT_KAFKA_IS_ENABLED_PROP = "xasecure.audit.kafka.is.enabled"; - public static final String AUDIT_SOLR_IS_ENABLED_PROP = "xasecure.audit.solr.is.enabled"; - - public static final String AUDIT_DEST_BASE = "xasecure.audit.destination"; - public static final String AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC = "xasecure.audit.shutdown.hook.max.wait.seconds"; - public static final String AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP = "xasecure.audit.provider.filecache.is.enabled"; - public static final String FILE_QUEUE_TYPE = "filequeue"; - public static final String DEFAULT_QUEUE_TYPE = "memoryqueue"; - public static final int AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT = 30; - - public static final int AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT = 10 * 1024; - public static final int AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT = 5 * 1000; - - private static final int RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY = 30; - - private volatile static AuditProviderFactory sFactory = null; - - private AuditHandler mProvider = null; - private String componentAppType = ""; - private boolean mInitDone = false; - private JVMShutdownHook jvmShutdownHook = null; - private ArrayList hbaseAppTypes = new ArrayList<>(Arrays.asList("hbaseMaster","hbaseRegional")); - - public AuditProviderFactory() { - LOG.info("AuditProviderFactory: creating.."); - - mProvider = getDefaultProvider(); - } - - public static AuditProviderFactory getInstance() { - AuditProviderFactory ret = sFactory; - if(ret == null) { - synchronized(AuditProviderFactory.class) { - ret = sFactory; - if(ret == null) { - ret = sFactory = new AuditProviderFactory(); - } - } - } - - return ret; - } - - public AuditHandler getAuditProvider() { - return mProvider; - } - - public boolean isInitDone() { - return mInitDone; - } - - /** - * call shutdown hook to provide a way to - * shutdown gracefully in addition to the ShutdownHook mechanism - */ - public void shutdown() { - if (isInitDone() && jvmShutdownHook != null) { - jvmShutdownHook.run(); - } - } - - public synchronized void init(Properties props, String appType) { - LOG.info("AuditProviderFactory: initializing.."); - - if (mInitDone) { - LOG.warn("AuditProviderFactory.init(): already initialized! Will try to re-initialize"); - } - mInitDone = true; - componentAppType = appType; - MiscUtil.setApplicationType(appType); - - boolean isEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_IS_ENABLED_PROP, true); + private static final Logger LOG = LoggerFactory.getLogger(AuditProviderFactory.class); + + public static final String AUDIT_IS_ENABLED_PROP = "xasecure.audit.is.enabled"; + public static final String AUDIT_HDFS_IS_ENABLED_PROP = "xasecure.audit.hdfs.is.enabled"; + public static final String AUDIT_LOG4J_IS_ENABLED_PROP = "xasecure.audit.log4j.is.enabled"; + public static final String AUDIT_KAFKA_IS_ENABLED_PROP = "xasecure.audit.kafka.is.enabled"; + public static final String AUDIT_SOLR_IS_ENABLED_PROP = "xasecure.audit.solr.is.enabled"; + public static final String AUDIT_DEST_BASE = "xasecure.audit.destination"; + public static final String AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC = "xasecure.audit.shutdown.hook.max.wait.seconds"; + public static final String AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP = "xasecure.audit.provider.filecache.is.enabled"; + public static final String FILE_QUEUE_TYPE = "filequeue"; + public static final String DEFAULT_QUEUE_TYPE = "memoryqueue"; + public static final int AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT = 30; + public static final int AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT = 10 * 1024; + public static final int AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT = 5 * 1000; + + private static final int RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY = 30; + + private static volatile AuditProviderFactory sFactory; + + private AuditHandler mProvider; + private String componentAppType = ""; + private boolean mInitDone; + private JVMShutdownHook jvmShutdownHook; + private final ArrayList hbaseAppTypes = new ArrayList<>(Arrays.asList("hbaseMaster", "hbaseRegional")); + + public AuditProviderFactory() { + LOG.info("AuditProviderFactory: creating.."); + + mProvider = getDefaultProvider(); + } + + public static AuditProviderFactory getInstance() { + AuditProviderFactory ret = sFactory; + + if (ret == null) { + synchronized (AuditProviderFactory.class) { + ret = sFactory; + + if (ret == null) { + ret = new AuditProviderFactory(); + + sFactory = ret; + } + } + } + + return ret; + } + + public AuditHandler getAuditProvider() { + return mProvider; + } + + public boolean isInitDone() { + return mInitDone; + } + + /** + * call shutdown hook to provide a way to + * shutdown gracefully in addition to the ShutdownHook mechanism + */ + public void shutdown() { + if (isInitDone() && jvmShutdownHook != null) { + jvmShutdownHook.run(); + } + } + + public synchronized void init(Properties props, String appType) { + LOG.info("AuditProviderFactory: initializing.."); + + if (mInitDone) { + LOG.warn("AuditProviderFactory.init(): already initialized! Will try to re-initialize"); + } + + mInitDone = true; + componentAppType = appType; + + MiscUtil.setApplicationType(appType); + + boolean isEnabled = MiscUtil.getBooleanProperty(props, AUDIT_IS_ENABLED_PROP, true); + if (!isEnabled) { LOG.info("AuditProviderFactory: Audit not enabled.."); + mProvider = getDefaultProvider(); + return; } - boolean isAuditToHdfsEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_HDFS_IS_ENABLED_PROP, false); - boolean isAuditToLog4jEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_LOG4J_IS_ENABLED_PROP, false); - boolean isAuditToKafkaEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_KAFKA_IS_ENABLED_PROP, false); - boolean isAuditToSolrEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_SOLR_IS_ENABLED_PROP, false); - - boolean isAuditFileCacheProviderEnabled = MiscUtil.getBooleanProperty(props, - AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP, false); - - List providers = new ArrayList(); - - for (Object propNameObj : props.keySet()) { - LOG.info("AUDIT PROPERTY: " + propNameObj.toString() + "=" - + props.getProperty(propNameObj.toString())); - } - - // Process new audit configurations - List destNameList = new ArrayList(); - - for (Object propNameObj : props.keySet()) { - String propName = propNameObj.toString(); - if (!propName.startsWith(AUDIT_DEST_BASE)) { - continue; - } - String destName = propName.substring(AUDIT_DEST_BASE.length() + 1); - List splits = MiscUtil.toArray(destName, "."); - if (splits.size() > 1) { - continue; - } - String value = props.getProperty(propName); - if (value.equalsIgnoreCase("enable") - || value.equalsIgnoreCase("enabled") - || value.equalsIgnoreCase("true")) { - destNameList.add(destName); - LOG.info("Audit destination " + propName + " is set to " - + value); - } - } - - for (String destName : destNameList) { - String destPropPrefix = AUDIT_DEST_BASE + "." + destName; - AuditHandler destProvider = getProviderFromConfig(props, - destPropPrefix, destName, null); - - if (destProvider != null) { - destProvider.init(props, destPropPrefix); - - String queueName = MiscUtil.getStringProperty(props, - destPropPrefix + "." + AuditQueue.PROP_QUEUE); - if (queueName == null || queueName.isEmpty()) { - LOG.info(destPropPrefix + "." + AuditQueue.PROP_QUEUE - + " is not set. Setting queue to batch for " - + destName); - queueName = "batch"; - } - LOG.info("queue for " + destName + " is " + queueName); - if (queueName != null && !queueName.isEmpty() - && !queueName.equalsIgnoreCase("none")) { - String queuePropPrefix = destPropPrefix + "." + queueName; - AuditHandler queueProvider = getProviderFromConfig(props, - queuePropPrefix, queueName, destProvider); - if (queueProvider != null) { - if (queueProvider instanceof AuditQueue) { - AuditQueue qProvider = (AuditQueue) queueProvider; - qProvider.init(props, queuePropPrefix); - providers.add(queueProvider); - } else { - LOG.error("Provider queue doesn't extend AuditQueue. Destination=" - + destName - + " can't be created. queueName=" - + queueName); - } - } else { - LOG.error("Queue provider for destination " + destName - + " can't be created. queueName=" + queueName); - } - } else { - LOG.info("Audit destination " + destProvider.getName() - + " added to provider list"); - providers.add(destProvider); - } - } - } - if (providers.size() > 0) { - LOG.info("Using v3 audit configuration"); - AuditHandler consumer = providers.get(0); - - // Possible pipeline is: - // async_queue -> summary_queue -> multidestination -> batch_queue - // -> hdfs_destination - // -> batch_queue -> solr_destination - // -> batch_queue -> kafka_destination - // Above, up to multidestination, the providers are same, then it - // branches out in parallel. - - // Set the providers in the reverse order e.g. - - if (providers.size() > 1) { - // If there are more than one destination, then we need multi - // destination to process it in parallel - LOG.info("MultiDestAuditProvider is used. Destination count=" - + providers.size()); - MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider(); - multiDestProvider.init(props); - multiDestProvider.addAuditProviders(providers); - consumer = multiDestProvider; - } - - // Let's see if Summary is enabled, then summarize before sending it - // downstream - String propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX; - boolean summaryEnabled = MiscUtil.getBooleanProperty(props, - propPrefix + "." + "summary" + "." + "enabled", false); - AuditSummaryQueue summaryQueue = null; - if (summaryEnabled) { - LOG.info("AuditSummaryQueue is enabled"); - summaryQueue = new AuditSummaryQueue(consumer); - summaryQueue.init(props, propPrefix); - consumer = summaryQueue; - } else { - LOG.info("AuditSummaryQueue is disabled"); - } - - if (!isAuditFileCacheProviderEnabled) { - // Create the AsysnQueue - AuditAsyncQueue asyncQueue = new AuditAsyncQueue(consumer); - propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "async"; - asyncQueue.init(props, propPrefix); - asyncQueue.setParentPath(componentAppType); - mProvider = asyncQueue; - LOG.info("Starting audit queue " + mProvider.getName()); - mProvider.start(); - } else { - // Assign AsyncQueue to AuditFileCacheProvider - AuditFileCacheProvider auditFileCacheProvider = new AuditFileCacheProvider(consumer); - propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "filecache"; - auditFileCacheProvider.init(props, propPrefix); - auditFileCacheProvider.setParentPath(componentAppType); - mProvider = auditFileCacheProvider; - LOG.info("Starting Audit File Cache Provider " + mProvider.getName()); - mProvider.start(); - } - } else { - LOG.info("No v3 audit configuration found. Trying v2 audit configurations"); - if (!isEnabled - || !(isAuditToHdfsEnabled - || isAuditToKafkaEnabled || isAuditToLog4jEnabled - || isAuditToSolrEnabled || providers.size() == 0)) { - LOG.info("AuditProviderFactory: Audit not enabled.."); - - mProvider = getDefaultProvider(); - - return; - } - - if (isAuditToHdfsEnabled) { - LOG.info("HdfsAuditProvider is enabled"); - - HdfsAuditProvider hdfsProvider = new HdfsAuditProvider(); - - boolean isAuditToHdfsAsync = MiscUtil.getBooleanProperty(props, - HdfsAuditProvider.AUDIT_HDFS_IS_ASYNC_PROP, false); - - if (isAuditToHdfsAsync) { - int maxQueueSize = MiscUtil.getIntProperty(props, - HdfsAuditProvider.AUDIT_HDFS_MAX_QUEUE_SIZE_PROP, - AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT); - int maxFlushInterval = MiscUtil - .getIntProperty( - props, - HdfsAuditProvider.AUDIT_HDFS_MAX_FLUSH_INTERVAL_PROP, - AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT); - - AsyncAuditProvider asyncProvider = new AsyncAuditProvider( - "HdfsAuditProvider", maxQueueSize, - maxFlushInterval, hdfsProvider); - - providers.add(asyncProvider); - } else { - providers.add(hdfsProvider); - } - } - - if (isAuditToKafkaEnabled) { - LOG.info("KafkaAuditProvider is enabled"); - KafkaAuditProvider kafkaProvider = new KafkaAuditProvider(); - kafkaProvider.init(props); - - if (kafkaProvider.isAsync()) { - AsyncAuditProvider asyncProvider = new AsyncAuditProvider( - "MyKafkaAuditProvider", 1000, 1000, kafkaProvider); - providers.add(asyncProvider); - } else { - providers.add(kafkaProvider); - } - } - - if (isAuditToSolrEnabled) { - LOG.info("SolrAuditProvider is enabled"); - SolrAuditProvider solrProvider = new SolrAuditProvider(); - solrProvider.init(props); - - if (solrProvider.isAsync()) { - AsyncAuditProvider asyncProvider = new AsyncAuditProvider( - "MySolrAuditProvider", 1000, 1000, solrProvider); - providers.add(asyncProvider); - } else { - providers.add(solrProvider); - } - } - - if (isAuditToLog4jEnabled) { - Log4jAuditProvider log4jProvider = new Log4jAuditProvider(); - - boolean isAuditToLog4jAsync = MiscUtil.getBooleanProperty( - props, Log4jAuditProvider.AUDIT_LOG4J_IS_ASYNC_PROP, - false); - - if (isAuditToLog4jAsync) { - int maxQueueSize = MiscUtil.getIntProperty(props, - Log4jAuditProvider.AUDIT_LOG4J_MAX_QUEUE_SIZE_PROP, - AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT); - int maxFlushInterval = MiscUtil - .getIntProperty( - props, - Log4jAuditProvider.AUDIT_LOG4J_MAX_FLUSH_INTERVAL_PROP, - AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT); - - AsyncAuditProvider asyncProvider = new AsyncAuditProvider( - "Log4jAuditProvider", maxQueueSize, - maxFlushInterval, log4jProvider); - - providers.add(asyncProvider); - } else { - providers.add(log4jProvider); - } - } - if (providers.size() == 0) { - mProvider = getDefaultProvider(); - } else if (providers.size() == 1) { - mProvider = providers.get(0); - } else { - MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider(); - - multiDestProvider.addAuditProviders(providers); - - mProvider = multiDestProvider; - } - - mProvider.init(props); - mProvider.start(); - } - - installJvmShutdownHook(props); - } - - private AuditHandler getProviderFromConfig(Properties props, - String propPrefix, String providerName, AuditHandler consumer) { - AuditHandler provider = null; - String className = MiscUtil.getStringProperty(props, propPrefix + "." - + BaseAuditHandler.PROP_CLASS_NAME); - if (className != null && !className.isEmpty()) { - try { - Class handlerClass = Class.forName(className); - if (handlerClass.isAssignableFrom(AuditQueue.class)) { - // Queue class needs consumer - handlerClass.getDeclaredConstructor(AuditHandler.class) - .newInstance(consumer); - } else { - provider = (AuditHandler) Class.forName(className) - .newInstance(); - } - } catch (Exception e) { - LOG.error("Can't instantiate audit class for providerName=" - + providerName + ", className=" + className - + ", propertyPrefix=" + propPrefix, e); - } - } else { - if (providerName.equalsIgnoreCase("file")) { - provider = new FileAuditDestination(); - } else if (providerName.equalsIgnoreCase("hdfs")) { - provider = new HDFSAuditDestination(); - } else if (providerName.equalsIgnoreCase("solr")) { - provider = new SolrAuditDestination(); - } else if (providerName.equalsIgnoreCase("elasticsearch")) { - provider = new ElasticSearchAuditDestination(); - } else if (providerName.equalsIgnoreCase("amazon_cloudwatch")) { - provider = new AmazonCloudWatchAuditDestination(); - } else if (providerName.equalsIgnoreCase("kafka")) { - provider = new KafkaAuditProvider(); - } else if (providerName.equalsIgnoreCase("log4j")) { - provider = new Log4JAuditDestination(); - } else if (providerName.equalsIgnoreCase("batch")) { - provider = getAuditProvider(props, propPrefix, consumer); - } else if (providerName.equalsIgnoreCase("async")) { - provider = new AuditAsyncQueue(consumer); - } else { - LOG.error("Provider name doesn't have any class associated with it. providerName=" - + providerName + ", propertyPrefix=" + propPrefix); - } - } - if (provider != null && provider instanceof AuditQueue) { - if (consumer == null) { - LOG.error("consumer can't be null for AuditQueue. queue=" - + provider.getName() + ", propertyPrefix=" + propPrefix); - provider = null; - } - } - return provider; - } - - private AuditHandler getAuditProvider(Properties props, String propPrefix, AuditHandler consumer) { - AuditHandler ret = null; - String queueType = MiscUtil.getStringProperty(props, propPrefix + "." + "queuetype", DEFAULT_QUEUE_TYPE); - - if (LOG.isDebugEnabled()) { - LOG.debug("==> AuditProviderFactory.getAuditProvider() propPerfix= " + propPrefix + ", " + " queueType= " + queueType); - } - - if (FILE_QUEUE_TYPE.equalsIgnoreCase(queueType)) { - AuditFileQueue auditFileQueue = new AuditFileQueue(consumer); - String propPrefixFileQueue = propPrefix + "." + FILE_QUEUE_TYPE; - auditFileQueue.init(props, propPrefixFileQueue); - ret = new AuditBatchQueue(auditFileQueue); - } else { - ret = new AuditBatchQueue(consumer); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("<== AuditProviderFactory.getAuditProvider()"); - } - - return ret; - } - - private AuditHandler getDefaultProvider() { - return new DummyAuditProvider(); - } - - private void installJvmShutdownHook(Properties props) { - int shutdownHookMaxWaitSeconds = MiscUtil.getIntProperty(props, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT); - jvmShutdownHook = new JVMShutdownHook(mProvider, shutdownHookMaxWaitSeconds); - String appType = this.componentAppType; - if (appType != null && !hbaseAppTypes.contains(appType)) { - ShutdownHookManager.get().addShutdownHook(jvmShutdownHook, RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY); - } - } - - private static class RangerAsyncAuditCleanup implements Runnable { - - final Semaphore startCleanup; - final Semaphore doneCleanup; - final AuditHandler mProvider; - - RangerAsyncAuditCleanup(AuditHandler provider, Semaphore startCleanup, Semaphore doneCleanup) { - this.startCleanup = startCleanup; - this.doneCleanup = doneCleanup; - this.mProvider = provider; - } - - @Override - public void run() { - while (true) { - LOG.info("RangerAsyncAuditCleanup: Waiting to audit cleanup start signal"); - try { - startCleanup.acquire(); - } catch (InterruptedException e) { - LOG.error("RangerAsyncAuditCleanup: Interrupted while waiting for audit startCleanup signal! Exiting the thread...", e); - break; - } - LOG.info("RangerAsyncAuditCleanup: Starting cleanup"); - mProvider.waitToComplete(); - mProvider.stop(); - doneCleanup.release(); - LOG.info("RangerAsyncAuditCleanup: Done cleanup"); - } - } - } - - private static class JVMShutdownHook extends Thread { - final Semaphore startCleanup = new Semaphore(0); - final Semaphore doneCleanup = new Semaphore(0); - final Thread cleanupThread; - final int maxWait; - final AtomicBoolean done = new AtomicBoolean(false); - - public JVMShutdownHook(AuditHandler provider, int maxWait) { - this.maxWait = maxWait; - Runnable runnable = new RangerAsyncAuditCleanup(provider, startCleanup, doneCleanup); - cleanupThread = new Thread(runnable, "Ranger async Audit cleanup"); - cleanupThread.setDaemon(true); - cleanupThread.start(); - } - - public void run() { - if (!done.compareAndSet(false, true)) { - LOG.info("==> JVMShutdownHook.run() already done by another thread"); - return; - } - LOG.info("==> JVMShutdownHook.run()"); - LOG.info("JVMShutdownHook: Signalling async audit cleanup to start."); - startCleanup.release(); - try { - Long start = System.currentTimeMillis(); - LOG.info("JVMShutdownHook: Waiting up to " + maxWait + " seconds for audit cleanup to finish."); - boolean cleanupFinishedInTime = doneCleanup.tryAcquire(maxWait, TimeUnit.SECONDS); - if (cleanupFinishedInTime) { - LOG.info("JVMShutdownHook: Audit cleanup finished after " + (System.currentTimeMillis() - start) + " milli seconds"); - } else { - LOG.warn("JVMShutdownHook: could not detect finishing of audit cleanup even after waiting for " + maxWait + " seconds!"); - } - } catch (InterruptedException e) { - LOG.error("JVMShutdownHook: Interrupted while waiting for completion of Async executor!", e); - } - LOG.info("JVMShutdownHook: Interrupting ranger async audit cleanup thread"); - cleanupThread.interrupt(); - LOG.info("<== JVMShutdownHook.run()"); - } - } + boolean isAuditToHdfsEnabled = MiscUtil.getBooleanProperty(props, AUDIT_HDFS_IS_ENABLED_PROP, false); + boolean isAuditToLog4jEnabled = MiscUtil.getBooleanProperty(props, AUDIT_LOG4J_IS_ENABLED_PROP, false); + boolean isAuditToKafkaEnabled = MiscUtil.getBooleanProperty(props, AUDIT_KAFKA_IS_ENABLED_PROP, false); + boolean isAuditToSolrEnabled = MiscUtil.getBooleanProperty(props, AUDIT_SOLR_IS_ENABLED_PROP, false); + boolean isAuditFileCacheProviderEnabled = MiscUtil.getBooleanProperty(props, AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP, false); + + List providers = new ArrayList<>(); + + for (Object propNameObj : props.keySet()) { + LOG.info("AUDIT PROPERTY: {}={}", propNameObj, props.getProperty(propNameObj.toString())); + } + + // Process new audit configurations + List destNameList = new ArrayList<>(); + + for (Object propNameObj : props.keySet()) { + String propName = propNameObj.toString(); + + if (!propName.startsWith(AUDIT_DEST_BASE)) { + continue; + } + + String destName = propName.substring(AUDIT_DEST_BASE.length() + 1); + List splits = MiscUtil.toArray(destName, "."); + + if (splits.size() > 1) { + continue; + } + + String value = props.getProperty(propName); + + if (value.equalsIgnoreCase("enable") || value.equalsIgnoreCase("enabled") || value.equalsIgnoreCase("true")) { + destNameList.add(destName); + + LOG.info("Audit destination {} is set to {}", propName, value); + } + } + + for (String destName : destNameList) { + String destPropPrefix = AUDIT_DEST_BASE + "." + destName; + AuditHandler destProvider = getProviderFromConfig(props, destPropPrefix, destName, null); + + if (destProvider != null) { + destProvider.init(props, destPropPrefix); + + String queueName = MiscUtil.getStringProperty(props, destPropPrefix + "." + AuditQueue.PROP_QUEUE); + + if (queueName == null || queueName.isEmpty()) { + LOG.info("{}.{} is not set. Setting queue to batch for {}", destPropPrefix, AuditQueue.PROP_QUEUE, destName); + + queueName = "batch"; + } + + LOG.info("queue for {} is {}", destName, queueName); + + if (queueName != null && !queueName.isEmpty() && !queueName.equalsIgnoreCase("none")) { + String queuePropPrefix = destPropPrefix + "." + queueName; + AuditHandler queueProvider = getProviderFromConfig(props, queuePropPrefix, queueName, destProvider); + + if (queueProvider != null) { + if (queueProvider instanceof AuditQueue) { + AuditQueue qProvider = (AuditQueue) queueProvider; + + qProvider.init(props, queuePropPrefix); + + providers.add(queueProvider); + } else { + LOG.error("Provider queue doesn't extend AuditQueue. Destination={} can't be created. queueName={}", destName, queueName); + } + } else { + LOG.error("Queue provider for destination {} can't be created. queueName={}", destName, queueName); + } + } else { + LOG.info("Audit destination {} added to provider list", destProvider.getName()); + + providers.add(destProvider); + } + } + } + if (!providers.isEmpty()) { + LOG.info("Using v3 audit configuration"); + + AuditHandler consumer = providers.get(0); + + // Possible pipeline is: + // async_queue -> summary_queue -> multidestination -> batch_queue + // -> hdfs_destination + // -> batch_queue -> solr_destination + // -> batch_queue -> kafka_destination + // Above, up to multidestination, the providers are same, then it + // branches out in parallel. + + // Set the providers in the reverse order e.g. + + if (providers.size() > 1) { + // If there are more than one destination, then we need multi destination to process it in parallel + LOG.info("MultiDestAuditProvider is used. Destination count={}", providers.size()); + + MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider(); + + multiDestProvider.init(props); + multiDestProvider.addAuditProviders(providers); + + consumer = multiDestProvider; + } + + // Let's see if Summary is enabled, then summarize before sending it downstream + String propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX; + boolean summaryEnabled = MiscUtil.getBooleanProperty(props, propPrefix + "." + "summary" + "." + "enabled", false); + + if (summaryEnabled) { + LOG.info("AuditSummaryQueue is enabled"); + + AuditSummaryQueue summaryQueue = new AuditSummaryQueue(consumer); + + summaryQueue.init(props, propPrefix); + + consumer = summaryQueue; + } else { + LOG.info("AuditSummaryQueue is disabled"); + } + + if (!isAuditFileCacheProviderEnabled) { + // Create the AsysnQueue + AuditAsyncQueue asyncQueue = new AuditAsyncQueue(consumer); + + propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "async"; + + asyncQueue.init(props, propPrefix); + asyncQueue.setParentPath(componentAppType); + + mProvider = asyncQueue; + + LOG.info("Starting audit queue {}", mProvider.getName()); + + mProvider.start(); + } else { + // Assign AsyncQueue to AuditFileCacheProvider + AuditFileCacheProvider auditFileCacheProvider = new AuditFileCacheProvider(consumer); + + propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "filecache"; + + auditFileCacheProvider.init(props, propPrefix); + auditFileCacheProvider.setParentPath(componentAppType); + + mProvider = auditFileCacheProvider; + + LOG.info("Starting Audit File Cache Provider {}", mProvider.getName()); + + mProvider.start(); + } + } else { + LOG.info("No v3 audit configuration found. Trying v2 audit configurations"); + + if (!isEnabled || !(isAuditToHdfsEnabled || isAuditToKafkaEnabled || isAuditToLog4jEnabled || isAuditToSolrEnabled || providers.isEmpty())) { + LOG.info("AuditProviderFactory: Audit not enabled.."); + + mProvider = getDefaultProvider(); + + return; + } + + if (isAuditToHdfsEnabled) { + LOG.info("HdfsAuditProvider is enabled"); + + HdfsAuditProvider hdfsProvider = new HdfsAuditProvider(); + + boolean isAuditToHdfsAsync = MiscUtil.getBooleanProperty(props, HdfsAuditProvider.AUDIT_HDFS_IS_ASYNC_PROP, false); + + if (isAuditToHdfsAsync) { + int maxQueueSize = MiscUtil.getIntProperty(props, HdfsAuditProvider.AUDIT_HDFS_MAX_QUEUE_SIZE_PROP, AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT); + int maxFlushInterval = MiscUtil.getIntProperty(props, HdfsAuditProvider.AUDIT_HDFS_MAX_FLUSH_INTERVAL_PROP, AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT); + + AsyncAuditProvider asyncProvider = new AsyncAuditProvider("HdfsAuditProvider", maxQueueSize, maxFlushInterval, hdfsProvider); + + providers.add(asyncProvider); + } else { + providers.add(hdfsProvider); + } + } + + if (isAuditToKafkaEnabled) { + LOG.info("KafkaAuditProvider is enabled"); + + KafkaAuditProvider kafkaProvider = new KafkaAuditProvider(); + + kafkaProvider.init(props); + + if (kafkaProvider.isAsync()) { + AsyncAuditProvider asyncProvider = new AsyncAuditProvider("MyKafkaAuditProvider", 1000, 1000, kafkaProvider); + + providers.add(asyncProvider); + } else { + providers.add(kafkaProvider); + } + } + + if (isAuditToSolrEnabled) { + LOG.info("SolrAuditProvider is enabled"); + + SolrAuditProvider solrProvider = new SolrAuditProvider(); + + solrProvider.init(props); + + if (solrProvider.isAsync()) { + AsyncAuditProvider asyncProvider = new AsyncAuditProvider("MySolrAuditProvider", 1000, 1000, solrProvider); + + providers.add(asyncProvider); + } else { + providers.add(solrProvider); + } + } + + if (isAuditToLog4jEnabled) { + Log4jAuditProvider log4jProvider = new Log4jAuditProvider(); + + boolean isAuditToLog4jAsync = MiscUtil.getBooleanProperty(props, Log4jAuditProvider.AUDIT_LOG4J_IS_ASYNC_PROP, false); + + if (isAuditToLog4jAsync) { + int maxQueueSize = MiscUtil.getIntProperty(props, Log4jAuditProvider.AUDIT_LOG4J_MAX_QUEUE_SIZE_PROP, AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT); + int maxFlushInterval = MiscUtil.getIntProperty(props, Log4jAuditProvider.AUDIT_LOG4J_MAX_FLUSH_INTERVAL_PROP, AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT); + + AsyncAuditProvider asyncProvider = new AsyncAuditProvider("Log4jAuditProvider", maxQueueSize, maxFlushInterval, log4jProvider); + + providers.add(asyncProvider); + } else { + providers.add(log4jProvider); + } + } + if (providers.isEmpty()) { + mProvider = getDefaultProvider(); + } else if (providers.size() == 1) { + mProvider = providers.get(0); + } else { + MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider(); + + multiDestProvider.addAuditProviders(providers); + + mProvider = multiDestProvider; + } + + mProvider.init(props); + mProvider.start(); + } + + installJvmShutdownHook(props); + } + + private AuditHandler getProviderFromConfig(Properties props, String propPrefix, String providerName, AuditHandler consumer) { + AuditHandler provider = null; + String className = MiscUtil.getStringProperty(props, propPrefix + "." + BaseAuditHandler.PROP_CLASS_NAME); + + if (className != null && !className.isEmpty()) { + try { + Class handlerClass = Class.forName(className); + + if (handlerClass.isAssignableFrom(AuditQueue.class)) { + // Queue class needs consumer + handlerClass.getDeclaredConstructor(AuditHandler.class).newInstance(consumer); + } else { + provider = (AuditHandler) Class.forName(className).newInstance(); + } + } catch (Exception e) { + LOG.error("Can't instantiate audit class for providerName={}, className={}, propertyPrefix={}", providerName, className, propPrefix, e); + } + } else { + if (providerName.equalsIgnoreCase("file")) { + provider = new FileAuditDestination(); + } else if (providerName.equalsIgnoreCase("hdfs")) { + provider = new HDFSAuditDestination(); + } else if (providerName.equalsIgnoreCase("solr")) { + provider = new SolrAuditDestination(); + } else if (providerName.equalsIgnoreCase("elasticsearch")) { + provider = new ElasticSearchAuditDestination(); + } else if (providerName.equalsIgnoreCase("amazon_cloudwatch")) { + provider = new AmazonCloudWatchAuditDestination(); + } else if (providerName.equalsIgnoreCase("kafka")) { + provider = new KafkaAuditProvider(); + } else if (providerName.equalsIgnoreCase("log4j")) { + provider = new Log4JAuditDestination(); + } else if (providerName.equalsIgnoreCase("batch")) { + provider = getAuditProvider(props, propPrefix, consumer); + } else if (providerName.equalsIgnoreCase("async")) { + provider = new AuditAsyncQueue(consumer); + } else { + LOG.error("Provider name doesn't have any class associated with it. providerName={}, propertyPrefix={}", providerName, propPrefix); + } + } + + if (provider instanceof AuditQueue) { + if (consumer == null) { + LOG.error("consumer can't be null for AuditQueue. queue={}, propertyPrefix={}", provider.getName(), propPrefix); + + provider = null; + } + } + + return provider; + } + + private AuditHandler getAuditProvider(Properties props, String propPrefix, AuditHandler consumer) { + AuditHandler ret; + String queueType = MiscUtil.getStringProperty(props, propPrefix + "." + "queuetype", DEFAULT_QUEUE_TYPE); + + LOG.debug("==> AuditProviderFactory.getAuditProvider() propPerfix={}, queueType={}", propPrefix, queueType); + + if (FILE_QUEUE_TYPE.equalsIgnoreCase(queueType)) { + AuditFileQueue auditFileQueue = new AuditFileQueue(consumer); + String propPrefixFileQueue = propPrefix + "." + FILE_QUEUE_TYPE; + + auditFileQueue.init(props, propPrefixFileQueue); + + ret = new AuditBatchQueue(auditFileQueue); + } else { + ret = new AuditBatchQueue(consumer); + } + + LOG.debug("<== AuditProviderFactory.getAuditProvider()"); + + return ret; + } + + private AuditHandler getDefaultProvider() { + return new DummyAuditProvider(); + } + + private void installJvmShutdownHook(Properties props) { + int shutdownHookMaxWaitSeconds = MiscUtil.getIntProperty(props, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT); + + jvmShutdownHook = new JVMShutdownHook(mProvider, shutdownHookMaxWaitSeconds); + + String appType = this.componentAppType; + + if (appType != null && !hbaseAppTypes.contains(appType)) { + ShutdownHookManager.get().addShutdownHook(jvmShutdownHook, RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY); + } + } + + private static class RangerAsyncAuditCleanup implements Runnable { + final Semaphore startCleanup; + final Semaphore doneCleanup; + final AuditHandler mProvider; + + RangerAsyncAuditCleanup(AuditHandler provider, Semaphore startCleanup, Semaphore doneCleanup) { + this.startCleanup = startCleanup; + this.doneCleanup = doneCleanup; + this.mProvider = provider; + } + + @Override + public void run() { + while (true) { + LOG.info("RangerAsyncAuditCleanup: Waiting to audit cleanup start signal"); + + try { + startCleanup.acquire(); + } catch (InterruptedException e) { + LOG.error("RangerAsyncAuditCleanup: Interrupted while waiting for audit startCleanup signal! Exiting the thread...", e); + + break; + } + + LOG.info("RangerAsyncAuditCleanup: Starting cleanup"); + + mProvider.waitToComplete(); + mProvider.stop(); + doneCleanup.release(); + + LOG.info("RangerAsyncAuditCleanup: Done cleanup"); + } + } + } + + private static class JVMShutdownHook extends Thread { + final Semaphore startCleanup = new Semaphore(0); + final Semaphore doneCleanup = new Semaphore(0); + final AtomicBoolean done = new AtomicBoolean(false); + final Thread cleanupThread; + final int maxWait; + + public JVMShutdownHook(AuditHandler provider, int maxWait) { + this.maxWait = maxWait; + + Runnable runnable = new RangerAsyncAuditCleanup(provider, startCleanup, doneCleanup); + + cleanupThread = new Thread(runnable, "Ranger async Audit cleanup"); + + cleanupThread.setDaemon(true); + cleanupThread.start(); + } + + public void run() { + if (!done.compareAndSet(false, true)) { + LOG.info("==> JVMShutdownHook.run() already done by another thread"); + + return; + } + + LOG.info("==> JVMShutdownHook.run()"); + LOG.info("JVMShutdownHook: Signalling async audit cleanup to start."); + + startCleanup.release(); + + try { + long start = System.currentTimeMillis(); + + LOG.info("JVMShutdownHook: Waiting up to {} seconds for audit cleanup to finish.", maxWait); + + boolean cleanupFinishedInTime = doneCleanup.tryAcquire(maxWait, TimeUnit.SECONDS); + + if (cleanupFinishedInTime) { + LOG.info("JVMShutdownHook: Audit cleanup finished after {} milli seconds", (System.currentTimeMillis() - start)); + } else { + LOG.warn("JVMShutdownHook: could not detect finishing of audit cleanup even after waiting for {} seconds!", maxWait); + } + } catch (InterruptedException e) { + LOG.error("JVMShutdownHook: Interrupted while waiting for completion of Async executor!", e); + } + + LOG.info("JVMShutdownHook: Interrupting ranger async audit cleanup thread"); + + cleanupThread.interrupt(); + + LOG.info("<== JVMShutdownHook.run()"); + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java index 38844c95d3..46e9ef85e8 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java @@ -28,74 +28,73 @@ import java.util.Properties; public class AuditWriterFactory { - private static final Logger logger = LoggerFactory.getLogger(AuditWriterFactory.class); - public static final String AUDIT_FILETYPE_DEFAULT = "json"; - public static final String AUDIT_JSON_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerJSONAuditWriter"; - public static final String AUDIT_ORC_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerORCAuditWriter"; - - public Map auditConfigs = null; - public Properties props = null; - public String propPrefix = null; - public String auditProviderName = null; - public RangerAuditWriter auditWriter = null; - private static volatile AuditWriterFactory me = null; + private static final Logger logger = LoggerFactory.getLogger(AuditWriterFactory.class); + + public static final String AUDIT_FILETYPE_DEFAULT = "json"; + public static final String AUDIT_JSON_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerJSONAuditWriter"; + public static final String AUDIT_ORC_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerORCAuditWriter"; + + private static volatile AuditWriterFactory me; + + public Map auditConfigs; + public Properties props; + public String propPrefix; + public String auditProviderName; + public RangerAuditWriter auditWriter; public static AuditWriterFactory getInstance() { AuditWriterFactory auditWriter = me; + if (auditWriter == null) { synchronized (AuditWriterFactory.class) { auditWriter = me; + if (auditWriter == null) { - me = auditWriter = new AuditWriterFactory(); + auditWriter = new AuditWriterFactory(); + me = auditWriter; } } } + return auditWriter; } - public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> AuditWriterFactory.init()"); - } + public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) throws Exception { + logger.debug("==> AuditWriterFactory.init()"); + this.props = props; this.propPrefix = propPrefix; this.auditProviderName = auditProviderName; this.auditConfigs = auditConfigs; - String auditFileType = MiscUtil.getStringProperty(props, propPrefix + ".batch.filequeue.filetype", AUDIT_FILETYPE_DEFAULT); - String writerClass = MiscUtil.getStringProperty(props, propPrefix + ".filewriter.impl"); + + String auditFileType = MiscUtil.getStringProperty(props, propPrefix + ".batch.filequeue.filetype", AUDIT_FILETYPE_DEFAULT); + String writerClass = MiscUtil.getStringProperty(props, propPrefix + ".filewriter.impl"); auditWriter = StringUtils.isEmpty(writerClass) ? createWriter(getDefaultWriter(auditFileType)) : createWriter(writerClass); if (auditWriter != null) { auditWriter.init(props, propPrefix, auditProviderName, auditConfigs); - if (logger.isDebugEnabled()) { - logger.debug("<== AuditWriterFactory.init() :" + auditWriter.getClass().getName()); - } + + logger.debug("<== AuditWriterFactory.init() :{}", auditWriter.getClass().getName()); } } - public RangerAuditWriter createWriter(String writerClass) throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> AuditWriterFactory.createWriter()"); - } - RangerAuditWriter ret = null; - try { - Class cls = (Class) Class.forName(writerClass); - ret = cls.newInstance(); - } catch (Exception e) { - throw e; - } - if (logger.isDebugEnabled()) { - logger.debug("<== AuditWriterFactory.createWriter()"); - } + public RangerAuditWriter createWriter(String writerClass) throws Exception { + logger.debug("==> AuditWriterFactory.createWriter()"); + + Class cls = (Class) Class.forName(writerClass); + RangerAuditWriter ret = cls.newInstance(); + + logger.debug("<== AuditWriterFactory.createWriter()"); + return ret; } public String getDefaultWriter(String auditFileType) { - if (logger.isDebugEnabled()) { - logger.debug("==> AuditWriterFactory.getDefaultWriter()"); - } - String ret = null; + logger.debug("==> AuditWriterFactory.getDefaultWriter()"); + + final String ret; + switch (auditFileType) { case "orc": ret = AUDIT_ORC_FILEWRITER_IMPL; @@ -103,14 +102,17 @@ public String getDefaultWriter(String auditFileType) { case "json": ret = AUDIT_JSON_FILEWRITER_IMPL; break; + default: + ret = null; + break; } - if (logger.isDebugEnabled()) { - logger.debug("<== AuditWriterFactory.getDefaultWriter() :" + ret); - } + + logger.debug("<== AuditWriterFactory.getDefaultWriter() :{}", ret); + return ret; } - public RangerAuditWriter getAuditWriter(){ + public RangerAuditWriter getAuditWriter() { return this.auditWriter; } -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java index 94c6d754b0..32ea8b9c3a 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java @@ -23,472 +23,481 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.util.*; -import java.util.concurrent.atomic.AtomicLong; - import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; +import java.io.File; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicLong; + public abstract class BaseAuditHandler implements AuditHandler { - private static final Logger LOG = LoggerFactory.getLogger(BaseAuditHandler.class); - - static final String AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP = "xasecure.audit.log.failure.report.min.interval.ms"; - - static final String AUDIT_LOG_STATUS_LOG_ENABLED = "xasecure.audit.log.status.log.enabled"; - static final String AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = "xasecure.audit.log.status.log.interval.sec"; - static final boolean DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED = false; - static final long DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = 5L * 60; // 5 minutes - - public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE = "xasecure.policymgr.clientssl.keystore"; - public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE = "xasecure.policymgr.clientssl.keystore.type"; - public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL = "xasecure.policymgr.clientssl.keystore.credential.file"; - public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL_ALIAS = "sslKeyStore"; - public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE_DEFAULT = "jks"; - - public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE = "xasecure.policymgr.clientssl.truststore"; - public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE = "xasecure.policymgr.clientssl.truststore.type"; - public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL = "xasecure.policymgr.clientssl.truststore.credential.file"; - public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL_ALIAS = "sslTrustStore"; - public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE_DEFAULT = "jks"; - - public static final String RANGER_SSL_KEYMANAGER_ALGO_TYPE = KeyManagerFactory.getDefaultAlgorithm(); - public static final String RANGER_SSL_TRUSTMANAGER_ALGO_TYPE = TrustManagerFactory.getDefaultAlgorithm(); - public static final String RANGER_SSL_CONTEXT_ALGO_TYPE = "TLSv1.2"; - - public static final String PROP_CONFIG = "config"; - public static final String FAILED_TO_LOG_AUDIT_EVENT = "failed to log audit event: {}"; - private int mLogFailureReportMinIntervalInMs = 60 * 1000; - - private AtomicLong mFailedLogLastReportTime = new AtomicLong(0); - private AtomicLong mFailedLogCountSinceLastReport = new AtomicLong(0); - private AtomicLong mFailedLogCountLifeTime = new AtomicLong(0); - - public static final String PROP_NAME = "name"; - public static final String PROP_CLASS_NAME = "classname"; - - public static final String PROP_DEFAULT_PREFIX = "xasecure.audit.provider"; - - protected String propPrefix = PROP_DEFAULT_PREFIX; - - protected String providerName = null; - protected String parentPath = null; - - protected int failedRetryTimes = 3; - protected int failedRetrySleep = 3 * 1000; - - int errorLogIntervalMS = 30 * 1000; // Every 30 seconds - long lastErrorLogMS = 0; - - long totalCount = 0; - long totalSuccessCount = 0; - long totalFailedCount = 0; - long totalStashedCount = 0; - long totalDeferredCount = 0; - - long lastIntervalCount = 0; - long lastIntervalSuccessCount = 0; - long lastIntervalFailedCount = 0; - long lastStashedCount = 0; - long lastDeferredCount = 0; - - boolean statusLogEnabled = DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED; - long statusLogIntervalMS = DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC * 1000; - long lastStatusLogTime = System.currentTimeMillis(); - long nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS; - - protected Properties props = null; - protected Map configProps = new HashMap<>(); - - @Override - public void init(Properties props) { - init(props, null); - } - - @Override - public void init(Properties props, String basePropertyName) { - LOG.info("BaseAuditProvider.init()"); - this.props = props; - if (basePropertyName != null) { - propPrefix = basePropertyName; - } - LOG.info("propPrefix=" + propPrefix); - - String name = MiscUtil.getStringProperty(props, basePropertyName + "." - + PROP_NAME); - if (name != null && !name.isEmpty()) { - setName(name); - } - // Get final token - if (providerName == null) { - List tokens = MiscUtil.toArray(propPrefix, "."); - if (!tokens.isEmpty()) { - String finalToken = tokens.get(tokens.size() - 1); - setName(finalToken); - LOG.info("Using providerName from property prefix. providerName=" - + getName()); - } - } - LOG.info("providerName=" + getName()); - - mLogFailureReportMinIntervalInMs = MiscUtil.getIntProperty(props, - AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP, 60 * 1000); - - boolean globalStatusLogEnabled = MiscUtil.getBooleanProperty(props, AUDIT_LOG_STATUS_LOG_ENABLED, DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED); - long globalStatusLogIntervalSec = MiscUtil.getLongProperty(props, AUDIT_LOG_STATUS_LOG_INTERVAL_SEC, DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC); - - statusLogEnabled = MiscUtil.getBooleanProperty(props, basePropertyName + ".status.log.enabled", globalStatusLogEnabled); - statusLogIntervalMS = MiscUtil.getLongProperty(props, basePropertyName + ".status.log.interval.sec", globalStatusLogIntervalSec) * 1000; - - nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS; - - LOG.info(AUDIT_LOG_STATUS_LOG_ENABLED + "=" + globalStatusLogEnabled); - LOG.info(AUDIT_LOG_STATUS_LOG_INTERVAL_SEC + "=" + globalStatusLogIntervalSec); - LOG.info(basePropertyName + ".status.log.enabled=" + statusLogEnabled); - LOG.info(basePropertyName + ".status.log.interval.sec=" + (statusLogIntervalMS / 1000)); - - String configPropsNamePrefix = propPrefix + "." + PROP_CONFIG + "."; - for (Object propNameObj : props.keySet()) { - String propName = propNameObj.toString(); - - if (!propName.startsWith(configPropsNamePrefix)) { - continue; - } - String configName = propName.substring(configPropsNamePrefix.length()); - String configValue = props.getProperty(propName); - configProps.put(configName, configValue); - LOG.info("Found Config property: " + configName + " => " + configValue); - } - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. - * audit.model.AuditEventBase) - */ - @Override - public boolean log(AuditEventBase event) { - return log(Collections.singletonList(event)); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.lang.String) - */ - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.util.Collection - * ) - */ - @Override - public boolean logJSON(Collection events) { - List eventList = new ArrayList<>(events.size()); - for (String event : events) { - eventList.add(MiscUtil.fromJson(event, AuthzAuditEvent.class)); - } - return log(eventList); - } - - @Override - public boolean logFile(File file) { - return logFile(file); - } - - public String getParentPath() { - return parentPath; - } - - public void setParentPath(String parentPath) { - this.parentPath = parentPath; - } - - public String getFinalPath() { - return getName(); - } - - public void setName(String name) { - providerName = name; - } - - @Override - public String getName() { - if (parentPath != null) { - return parentPath + "." + providerName; - } - return providerName; - } - - public long addTotalCount(int count) { - totalCount += count; - return totalCount; - } - - public long addSuccessCount(int count) { - totalSuccessCount += count; - return totalSuccessCount; - } - - public long addFailedCount(int count) { - totalFailedCount += count; - return totalFailedCount; - } - - public long addStashedCount(int count) { - totalStashedCount += count; - return totalStashedCount; - } - - public long addDeferredCount(int count) { - totalDeferredCount += count; - return totalDeferredCount; - } - - public long getTotalCount() { - return totalCount; - } - - public long getTotalSuccessCount() { - return totalSuccessCount; - } - - public long getTotalFailedCount() { - return totalFailedCount; - } - - public long getTotalStashedCount() { - return totalStashedCount; - } - - public long getLastStashedCount() { - return lastStashedCount; - } - - public long getTotalDeferredCount() { - return totalDeferredCount; - } - - public long getLastDeferredCount() { - return lastDeferredCount; - } - - public boolean isStatusLogEnabled() { return statusLogEnabled; } - - public void logStatusIfRequired() { - if (System.currentTimeMillis() > nextStatusLogTime) { - logStatus(); - } - } - - public void logStatus() { - try { - long currTime = System.currentTimeMillis(); - long diffTime = currTime - lastStatusLogTime; - - lastStatusLogTime = currTime; - nextStatusLogTime = currTime + statusLogIntervalMS; - - long diffCount = totalCount - lastIntervalCount; - long diffSuccess = totalSuccessCount - lastIntervalSuccessCount; - long diffFailed = totalFailedCount - lastIntervalFailedCount; - long diffStashed = totalStashedCount - lastStashedCount; - long diffDeferred = totalDeferredCount - lastDeferredCount; - - if (diffCount == 0 && diffSuccess == 0 && diffFailed == 0 - && diffStashed == 0 && diffDeferred == 0) { - return; - } - - lastIntervalCount = totalCount; - lastIntervalSuccessCount = totalSuccessCount; - lastIntervalFailedCount = totalFailedCount; - lastStashedCount = totalStashedCount; - lastDeferredCount = totalDeferredCount; - - if (statusLogEnabled) { - String finalPath = ""; - String tFinalPath = getFinalPath(); - if (!getName().equals(tFinalPath)) { - finalPath = ", finalDestination=" + tFinalPath; - } - - logAuditStatus(diffTime, diffCount, diffSuccess, diffFailed, diffStashed, diffDeferred, finalPath); - } - } catch (Exception t) { - LOG.error("Error while printing stats. auditProvider=" + getName()); - } - } - private void logAuditStatus(long diffTime, long diffCount, long diffSuccess, long diffFailed, long diffStashed, long diffDeferred, String finalPath) { - String msg = "Audit Status Log: name=" - + getName() - + finalPath - + ", interval=" - + formatIntervalForLog(diffTime) - + ", events=" - + diffCount - + (diffSuccess > 0 ? (", succcessCount=" + diffSuccess) - : "") - + (diffFailed > 0 ? (", failedCount=" + diffFailed) : "") - + (diffStashed > 0 ? (", stashedCount=" + diffStashed) : "") - + (diffDeferred > 0 ? (", deferredCount=" + diffDeferred) - : "") - + ", totalEvents=" - + totalCount - + (totalSuccessCount > 0 ? (", totalSuccessCount=" + totalSuccessCount) - : "") - + (totalFailedCount > 0 ? (", totalFailedCount=" + totalFailedCount) - : "") - + (totalStashedCount > 0 ? (", totalStashedCount=" + totalStashedCount) - : "") - + (totalDeferredCount > 0 ? (", totalDeferredCount=" + totalDeferredCount) - : ""); - LOG.info(msg); - } - - public void logError(String msg, Object arg) { - long currTimeMS = System.currentTimeMillis(); - if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { - LOG.error(msg, arg); - lastErrorLogMS = currTimeMS; - } - } - - public void logError(String msg, Throwable ex) { - long currTimeMS = System.currentTimeMillis(); - if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { - LOG.error(msg, ex); - lastErrorLogMS = currTimeMS; - } - } - - public String getTimeDiffStr(long time1, long time2) { - long timeInMs = Math.abs(time1 - time2); - return formatIntervalForLog(timeInMs); - } - - public String formatIntervalForLog(long timeInMs) { - long hours = timeInMs / (60 * 60 * 1000); - long minutes = (timeInMs / (60 * 1000)) % 60; - long seconds = (timeInMs % (60 * 1000)) / 1000; - long mSeconds = (timeInMs % (1000)); - - if (hours > 0) - return String.format("%02d:%02d:%02d.%03d hours", hours, minutes, - seconds, mSeconds); - else if (minutes > 0) - return String.format("%02d:%02d.%03d minutes", minutes, seconds, - mSeconds); - else if (seconds > 0) - return String.format("%02d.%03d seconds", seconds, mSeconds); - else - return String.format("%03d milli-seconds", mSeconds); - } - - public void logFailedEvent(AuditEventBase event) { - logFailedEvent(event, ""); - } - - public void logFailedEvent(AuditEventBase event, Throwable excp) { - long now = System.currentTimeMillis(); - - long timeSinceLastReport = now - mFailedLogLastReportTime.get(); - long countSinceLastReport = mFailedLogCountSinceLastReport - .incrementAndGet(); - long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); - - if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { - mFailedLogLastReportTime.set(now); - mFailedLogCountSinceLastReport.set(0); - - if (excp != null) { - LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event), excp); - } else { - LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event)); - } - - if (countLifeTime > 1) { // no stats to print for the 1st failure - LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); - } - } - } - - public void logFailedEvent(Collection events) { - logFailedEvent(events, ""); - } - - public void logFailedEvent(Collection events, Throwable excp) { - for (AuditEventBase event : events) { - logFailedEvent(event, excp); - } - } - - public void logFailedEvent(AuditEventBase event, String message) { - long now = System.currentTimeMillis(); - - long timeSinceLastReport = now - mFailedLogLastReportTime.get(); - long countSinceLastReport = mFailedLogCountSinceLastReport - .incrementAndGet(); - long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); - - if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { - mFailedLogLastReportTime.set(now); - mFailedLogCountSinceLastReport.set(0); - - LOG.warn("failed to log audit event: {} , errorMessage={}", MiscUtil.stringify(event), message); - - if (countLifeTime > 1) { // no stats to print for the 1st failure - LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); - } - } - } - - public void logFailedEvent(Collection events, - String errorMessage) { - for (AuditEventBase event : events) { - logFailedEvent(event, errorMessage); - } - } - - public void logFailedEventJSON(String event, Throwable excp) { - long now = System.currentTimeMillis(); - - long timeSinceLastReport = now - mFailedLogLastReportTime.get(); - long countSinceLastReport = mFailedLogCountSinceLastReport - .incrementAndGet(); - long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); - - if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { - mFailedLogLastReportTime.set(now); - mFailedLogCountSinceLastReport.set(0); - - if (excp != null) { - LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event, excp); - } else { - LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event); - } - - if (countLifeTime > 1) { // no stats to print for the 1st failure - LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); - } - } - } - - public void logFailedEventJSON(Collection events, Throwable excp) { - for (String event : events) { - logFailedEventJSON(event, excp); - } - } + private static final Logger LOG = LoggerFactory.getLogger(BaseAuditHandler.class); + + public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE = "xasecure.policymgr.clientssl.keystore"; + public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE = "xasecure.policymgr.clientssl.keystore.type"; + public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL = "xasecure.policymgr.clientssl.keystore.credential.file"; + public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_CREDENTIAL_ALIAS = "sslKeyStore"; + public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE_DEFAULT = "jks"; + public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE = "xasecure.policymgr.clientssl.truststore"; + public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE = "xasecure.policymgr.clientssl.truststore.type"; + public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL = "xasecure.policymgr.clientssl.truststore.credential.file"; + public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_CREDENTIAL_ALIAS = "sslTrustStore"; + public static final String RANGER_POLICYMGR_TRUSTSTORE_FILE_TYPE_DEFAULT = "jks"; + public static final String RANGER_SSL_KEYMANAGER_ALGO_TYPE = KeyManagerFactory.getDefaultAlgorithm(); + public static final String RANGER_SSL_TRUSTMANAGER_ALGO_TYPE = TrustManagerFactory.getDefaultAlgorithm(); + public static final String RANGER_SSL_CONTEXT_ALGO_TYPE = "TLSv1.2"; + public static final String PROP_CONFIG = "config"; + public static final String FAILED_TO_LOG_AUDIT_EVENT = "failed to log audit event: {}"; + public static final String PROP_NAME = "name"; + public static final String PROP_CLASS_NAME = "classname"; + public static final String PROP_DEFAULT_PREFIX = "xasecure.audit.provider"; + + static final String AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP = "xasecure.audit.log.failure.report.min.interval.ms"; + static final String AUDIT_LOG_STATUS_LOG_ENABLED = "xasecure.audit.log.status.log.enabled"; + static final String AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = "xasecure.audit.log.status.log.interval.sec"; + static final boolean DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED = false; + static final long DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = 5L * 60; // 5 minutes + + protected String propPrefix = PROP_DEFAULT_PREFIX; + protected String providerName; + protected String parentPath; + protected int failedRetryTimes = 3; + protected int failedRetrySleep = 3 * 1000; + protected Map configProps = new HashMap<>(); + protected Properties props; + + int errorLogIntervalMS = 30 * 1000; // Every 30 seconds + long lastErrorLogMS; + long totalCount; + long totalSuccessCount; + long totalFailedCount; + long totalStashedCount; + long totalDeferredCount; + long lastIntervalCount; + long lastIntervalSuccessCount; + long lastIntervalFailedCount; + long lastStashedCount; + long lastDeferredCount; + boolean statusLogEnabled = DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED; + long statusLogIntervalMS = DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC * 1000; + long lastStatusLogTime = System.currentTimeMillis(); + long nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS; + + private int mLogFailureReportMinIntervalInMs = 60 * 1000; + private final AtomicLong mFailedLogLastReportTime = new AtomicLong(0); + private final AtomicLong mFailedLogCountSinceLastReport = new AtomicLong(0); + private final AtomicLong mFailedLogCountLifeTime = new AtomicLong(0); + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. + * audit.model.AuditEventBase) + */ + @Override + public boolean log(AuditEventBase event) { + return log(Collections.singletonList(event)); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.lang.String) + */ + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, AuthzAuditEvent.class); + + return log(eventObj); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.util.Collection) + */ + @Override + public boolean logJSON(Collection events) { + List eventList = new ArrayList<>(events.size()); + + for (String event : events) { + eventList.add(MiscUtil.fromJson(event, AuthzAuditEvent.class)); + } + + return log(eventList); + } + + @Override + public boolean logFile(File file) { + return false; + } + + @Override + public void init(Properties props) { + init(props, null); + } + + @Override + public void init(Properties props, String basePropertyName) { + LOG.info("BaseAuditProvider.init()"); + + this.props = props; + + if (basePropertyName != null) { + propPrefix = basePropertyName; + } + + LOG.info("propPrefix={}", propPrefix); + + String name = MiscUtil.getStringProperty(props, basePropertyName + "." + PROP_NAME); + + if (name != null && !name.isEmpty()) { + setName(name); + } + + // Get final token + if (providerName == null) { + List tokens = MiscUtil.toArray(propPrefix, "."); + + if (!tokens.isEmpty()) { + String finalToken = tokens.get(tokens.size() - 1); + + setName(finalToken); + + LOG.info("Using providerName from property prefix. providerName={}", getName()); + } + } + + LOG.info("providerName={}", getName()); + + mLogFailureReportMinIntervalInMs = MiscUtil.getIntProperty(props, AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP, 60 * 1000); + + boolean globalStatusLogEnabled = MiscUtil.getBooleanProperty(props, AUDIT_LOG_STATUS_LOG_ENABLED, DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED); + long globalStatusLogIntervalSec = MiscUtil.getLongProperty(props, AUDIT_LOG_STATUS_LOG_INTERVAL_SEC, DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC); + + statusLogEnabled = MiscUtil.getBooleanProperty(props, basePropertyName + ".status.log.enabled", globalStatusLogEnabled); + statusLogIntervalMS = MiscUtil.getLongProperty(props, basePropertyName + ".status.log.interval.sec", globalStatusLogIntervalSec) * 1000; + nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS; + + LOG.info("{}={}", AUDIT_LOG_STATUS_LOG_ENABLED, globalStatusLogEnabled); + LOG.info("{}={}", AUDIT_LOG_STATUS_LOG_INTERVAL_SEC, globalStatusLogIntervalSec); + LOG.info("{}.status.log.enabled={}", basePropertyName, statusLogEnabled); + LOG.info("{}.status.log.interval.sec={}", basePropertyName, (statusLogIntervalMS / 1000)); + + String configPropsNamePrefix = propPrefix + "." + PROP_CONFIG + "."; + + for (Object propNameObj : props.keySet()) { + String propName = propNameObj.toString(); + + if (!propName.startsWith(configPropsNamePrefix)) { + continue; + } + + String configName = propName.substring(configPropsNamePrefix.length()); + String configValue = props.getProperty(propName); + + configProps.put(configName, configValue); + + LOG.info("Found Config property: {} => {}", configName, configValue); + } + } + + @Override + public String getName() { + if (parentPath != null) { + return parentPath + "." + providerName; + } + + return providerName; + } + + public void setName(String name) { + providerName = name; + } + + public String getParentPath() { + return parentPath; + } + + public void setParentPath(String parentPath) { + this.parentPath = parentPath; + } + + public String getFinalPath() { + return getName(); + } + + public long addTotalCount(int count) { + totalCount += count; + + return totalCount; + } + + public long addSuccessCount(int count) { + totalSuccessCount += count; + + return totalSuccessCount; + } + + public long addFailedCount(int count) { + totalFailedCount += count; + + return totalFailedCount; + } + + public long addStashedCount(int count) { + totalStashedCount += count; + + return totalStashedCount; + } + + public long addDeferredCount(int count) { + totalDeferredCount += count; + + return totalDeferredCount; + } + + public long getTotalCount() { + return totalCount; + } + + public long getTotalSuccessCount() { + return totalSuccessCount; + } + + public long getTotalFailedCount() { + return totalFailedCount; + } + + public long getTotalStashedCount() { + return totalStashedCount; + } + + public long getLastStashedCount() { + return lastStashedCount; + } + + public long getTotalDeferredCount() { + return totalDeferredCount; + } + + public long getLastDeferredCount() { + return lastDeferredCount; + } + + public boolean isStatusLogEnabled() { + return statusLogEnabled; + } + public void logStatusIfRequired() { + if (System.currentTimeMillis() > nextStatusLogTime) { + logStatus(); + } + } + + public void logStatus() { + try { + long currTime = System.currentTimeMillis(); + long diffTime = currTime - lastStatusLogTime; + + lastStatusLogTime = currTime; + nextStatusLogTime = currTime + statusLogIntervalMS; + + long diffCount = totalCount - lastIntervalCount; + long diffSuccess = totalSuccessCount - lastIntervalSuccessCount; + long diffFailed = totalFailedCount - lastIntervalFailedCount; + long diffStashed = totalStashedCount - lastStashedCount; + long diffDeferred = totalDeferredCount - lastDeferredCount; + + if (diffCount == 0 && diffSuccess == 0 && diffFailed == 0 && diffStashed == 0 && diffDeferred == 0) { + return; + } + + lastIntervalCount = totalCount; + lastIntervalSuccessCount = totalSuccessCount; + lastIntervalFailedCount = totalFailedCount; + lastStashedCount = totalStashedCount; + lastDeferredCount = totalDeferredCount; + + if (statusLogEnabled) { + String finalPath = ""; + String tFinalPath = getFinalPath(); + + if (!getName().equals(tFinalPath)) { + finalPath = ", finalDestination=" + tFinalPath; + } + + logAuditStatus(diffTime, diffCount, diffSuccess, diffFailed, diffStashed, diffDeferred, finalPath); + } + } catch (Exception t) { + LOG.error("Error while printing stats. auditProvider={}", getName()); + } + } + + public void logError(String msg, Object...args) { + long currTimeMS = System.currentTimeMillis(); + + if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { + LOG.error(msg, args); + + lastErrorLogMS = currTimeMS; + } + } + + public void logError(String msg, Throwable ex) { + long currTimeMS = System.currentTimeMillis(); + + if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { + LOG.error(msg, ex); + + lastErrorLogMS = currTimeMS; + } + } + + public String getTimeDiffStr(long time1, long time2) { + long timeInMs = Math.abs(time1 - time2); + + return formatIntervalForLog(timeInMs); + } + + public String formatIntervalForLog(long timeInMs) { + long hours = timeInMs / (60 * 60 * 1000); + long minutes = (timeInMs / (60 * 1000)) % 60; + long seconds = (timeInMs % (60 * 1000)) / 1000; + long mSeconds = (timeInMs % (1000)); + + if (hours > 0) { + return String.format("%02d:%02d:%02d.%03d hours", hours, minutes, seconds, mSeconds); + } else if (minutes > 0) { + return String.format("%02d:%02d.%03d minutes", minutes, seconds, mSeconds); + } else if (seconds > 0) { + return String.format("%02d.%03d seconds", seconds, mSeconds); + } else { + return String.format("%03d milli-seconds", mSeconds); + } + } + + public void logFailedEvent(AuditEventBase event) { + logFailedEvent(event, ""); + } + + public void logFailedEvent(AuditEventBase event, Throwable excp) { + long now = System.currentTimeMillis(); + long timeSinceLastReport = now - mFailedLogLastReportTime.get(); + long countSinceLastReport = mFailedLogCountSinceLastReport.incrementAndGet(); + long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); + + if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { + mFailedLogLastReportTime.set(now); + mFailedLogCountSinceLastReport.set(0); + + if (excp != null) { + LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event), excp); + } else { + LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event)); + } + + if (countLifeTime > 1) { // no stats to print for the 1st failure + LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); + } + } + } + + public void logFailedEvent(Collection events) { + logFailedEvent(events, ""); + } + + public void logFailedEvent(Collection events, Throwable excp) { + for (AuditEventBase event : events) { + logFailedEvent(event, excp); + } + } + + public void logFailedEvent(AuditEventBase event, String message) { + long now = System.currentTimeMillis(); + long timeSinceLastReport = now - mFailedLogLastReportTime.get(); + long countSinceLastReport = mFailedLogCountSinceLastReport.incrementAndGet(); + long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); + + if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { + mFailedLogLastReportTime.set(now); + mFailedLogCountSinceLastReport.set(0); + + LOG.warn("failed to log audit event: {} , errorMessage={}", MiscUtil.stringify(event), message); + + if (countLifeTime > 1) { // no stats to print for the 1st failure + LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); + } + } + } + + public void logFailedEvent(Collection events, String errorMessage) { + for (AuditEventBase event : events) { + logFailedEvent(event, errorMessage); + } + } + + public void logFailedEventJSON(String event, Throwable excp) { + long now = System.currentTimeMillis(); + long timeSinceLastReport = now - mFailedLogLastReportTime.get(); + long countSinceLastReport = mFailedLogCountSinceLastReport.incrementAndGet(); + long countLifeTime = mFailedLogCountLifeTime.incrementAndGet(); + + if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) { + mFailedLogLastReportTime.set(now); + mFailedLogCountSinceLastReport.set(0); + + if (excp != null) { + LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event, excp); + } else { + LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event); + } + + if (countLifeTime > 1) { // no stats to print for the 1st failure + LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime); + } + } + } + + public void logFailedEventJSON(Collection events, Throwable excp) { + for (String event : events) { + logFailedEventJSON(event, excp); + } + } + + private void logAuditStatus(long diffTime, long diffCount, long diffSuccess, long diffFailed, long diffStashed, long diffDeferred, String finalPath) { + String msg = "Audit Status Log: name=" + + getName() + + finalPath + + ", interval=" + + formatIntervalForLog(diffTime) + + ", events=" + + diffCount + + (diffSuccess > 0 ? (", succcessCount=" + diffSuccess) + : "") + + (diffFailed > 0 ? (", failedCount=" + diffFailed) : "") + + (diffStashed > 0 ? (", stashedCount=" + diffStashed) : "") + + (diffDeferred > 0 ? (", deferredCount=" + diffDeferred) + : "") + + ", totalEvents=" + + totalCount + + (totalSuccessCount > 0 ? (", totalSuccessCount=" + totalSuccessCount) + : "") + + (totalFailedCount > 0 ? (", totalFailedCount=" + totalFailedCount) + : "") + + (totalStashedCount > 0 ? (", totalStashedCount=" + totalStashedCount) + : "") + + (totalDeferredCount > 0 ? (", totalDeferredCount=" + totalDeferredCount) + : ""); + LOG.info(msg); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java index c80d045b06..c5dccd2a5e 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,104 +17,104 @@ */ package org.apache.ranger.audit.provider; -import java.util.Collection; - import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.model.AuthzAuditEvent; +import java.util.Collection; + public abstract class BufferedAuditProvider extends BaseAuditHandler { - private LogBuffer mBuffer = null; - private LogDestination mDestination = null; - - @Override - public boolean log(AuditEventBase event) { - if (event instanceof AuthzAuditEvent) { - AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; - - if (authzEvent.getAgentHostname() == null) { - authzEvent.setAgentHostname(MiscUtil.getHostname()); - } - - if (authzEvent.getLogType() == null) { - authzEvent.setLogType("RangerAudit"); - } - - if (authzEvent.getEventId() == null) { - authzEvent.setEventId(MiscUtil.generateUniqueId()); - } - } - - if (!mBuffer.add(event)) { - logFailedEvent(event); - return false; - } - return true; - } - - @Override - public boolean log(Collection events) { - boolean ret = true; - for (AuditEventBase event : events) { - ret = log(event); - if (!ret) { - break; - } - } - return ret; - } - - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - @Override - public boolean logJSON(Collection events) { - boolean ret = true; - for (String event : events) { - ret = logJSON(event); - if (!ret) { - break; - } - } - return ret; - } - - @Override - public void start() { - mBuffer.start(mDestination); - } - - @Override - public void stop() { - mBuffer.stop(); - } - - @Override - public void waitToComplete() { - } - - @Override - public void waitToComplete(long timeout) { - } - - @Override - public void flush() { - } - - protected LogBuffer getBuffer() { - return mBuffer; - } - - protected LogDestination getDestination() { - return mDestination; - } - - protected void setBufferAndDestination(LogBuffer buffer, - LogDestination destination) { - mBuffer = buffer; - mDestination = destination; - } + private LogBuffer mBuffer; + private LogDestination mDestination; + + @Override + public boolean log(AuditEventBase event) { + if (event instanceof AuthzAuditEvent) { + AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; + + if (authzEvent.getAgentHostname() == null) { + authzEvent.setAgentHostname(MiscUtil.getHostname()); + } + + if (authzEvent.getLogType() == null) { + authzEvent.setLogType("RangerAudit"); + } + + if (authzEvent.getEventId() == null) { + authzEvent.setEventId(MiscUtil.generateUniqueId()); + } + } + + if (!mBuffer.add(event)) { + logFailedEvent(event); + return false; + } + return true; + } + + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, + AuthzAuditEvent.class); + return log(eventObj); + } + + @Override + public boolean logJSON(Collection events) { + boolean ret = true; + for (String event : events) { + ret = logJSON(event); + if (!ret) { + break; + } + } + return ret; + } + + @Override + public boolean log(Collection events) { + boolean ret = true; + for (AuditEventBase event : events) { + ret = log(event); + if (!ret) { + break; + } + } + return ret; + } + + @Override + public void start() { + mBuffer.start(mDestination); + } + + @Override + public void stop() { + mBuffer.stop(); + } + + @Override + public void waitToComplete() { + } + + @Override + public void waitToComplete(long timeout) { + } + + @Override + public void flush() { + } + + protected LogBuffer getBuffer() { + return mBuffer; + } + + protected LogDestination getDestination() { + return mDestination; + } + + protected void setBufferAndDestination(LogBuffer buffer, + LogDestination destination) { + mBuffer = buffer; + mDestination = destination; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DebugTracer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DebugTracer.java index 7396fd0487..dbf591403b 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DebugTracer.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DebugTracer.java @@ -17,12 +17,19 @@ package org.apache.ranger.audit.provider; public interface DebugTracer { - void debug(String msg); - void debug(String msg, Throwable excp); - void info(String msg); - void info(String msg, Throwable excp); - void warn(String msg); - void warn(String msg, Throwable excp); - void error(String msg); - void error(String msg, Throwable excp); + void debug(String msg); + + void debug(String msg, Throwable excp); + + void info(String msg); + + void info(String msg, Throwable excp); + + void warn(String msg); + + void warn(String msg, Throwable excp); + + void error(String msg); + + void error(String msg, Throwable excp); } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java index cbd25ab7c7..d9af9ce5cd 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,99 +17,99 @@ */ package org.apache.ranger.audit.provider; -import java.io.File; -import java.util.Collection; -import java.util.Properties; - import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.model.AuthzAuditEvent; +import java.io.File; +import java.util.Collection; +import java.util.Properties; public class DummyAuditProvider implements AuditHandler { - @Override - public void init(Properties prop) { - // intentionally left empty - } - - @Override - public boolean log(AuditEventBase event) { - // intentionally left empty - return true; - } - - @Override - public boolean log(Collection events) { - for (AuditEventBase event : events) { - log(event); - } - return true; - } - - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - @Override - public boolean logJSON(Collection events) { - for (String event : events) { - logJSON(event); - } - return false; - } - - @Override - public void start() { - // intentionally left empty - } - - @Override - public void stop() { - // intentionally left empty - } - - @Override - public void waitToComplete() { - // intentionally left empty - } - - @Override - public void flush() { - // intentionally left empty - } - - /* (non-Javadoc) - * @see org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties, java.lang.String) - */ - @Override - public void init(Properties prop, String basePropertyName) { - // intentionally left empty - } - - /* (non-Javadoc) - * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long) - */ - @Override - public void waitToComplete(long timeout) { - // intentionally left empty - } - - /* (non-Javadoc) - * @see org.apache.ranger.audit.provider.AuditProvider#getName() - */ - @Override - public String getName() { - return this.getClass().getName(); - } - - /* (non-Javadoc) - * @see org.apache.ranger.audit.provider.AuditProvider#getAuditFileType() - */ - @Override - public boolean logFile(File file) { - return logFile(file); - } - + @Override + public boolean log(AuditEventBase event) { + // intentionally left empty + return true; + } + + @Override + public boolean log(Collection events) { + for (AuditEventBase event : events) { + log(event); + } + + return true; + } + + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, AuthzAuditEvent.class); + + return log(eventObj); + } + + @Override + public boolean logJSON(Collection events) { + for (String event : events) { + logJSON(event); + } + + return false; + } + + /* (non-Javadoc) + * @see org.apache.ranger.audit.provider.AuditProvider#getAuditFileType() + */ + @Override + public boolean logFile(File file) { + return logFile(file); + } + + @Override + public void init(Properties prop) { + // intentionally left empty + } + + /* (non-Javadoc) + * @see org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties, java.lang.String) + */ + @Override + public void init(Properties prop, String basePropertyName) { + // intentionally left empty + } + + @Override + public void start() { + // intentionally left empty + } + + @Override + public void stop() { + // intentionally left empty + } + + @Override + public void waitToComplete() { + // intentionally left empty + } + + /* (non-Javadoc) + * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long) + */ + @Override + public void waitToComplete(long timeout) { + // intentionally left empty + } + + /* (non-Javadoc) + * @see org.apache.ranger.audit.provider.AuditProvider#getName() + */ + @Override + public String getName() { + return this.getClass().getName(); + } + + @Override + public void flush() { + // intentionally left empty + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java index d720ebcccb..e08c5b8085 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java @@ -18,10 +18,11 @@ */ package org.apache.ranger.audit.provider; +import org.apache.hadoop.security.UserGroupInformation; + import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; -import java.io.FileFilter; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; @@ -34,658 +35,642 @@ import java.io.Writer; import java.security.PrivilegedExceptionAction; import java.util.Arrays; -import java.util.Comparator; import java.util.TreeSet; -import org.apache.hadoop.security.UserGroupInformation; +public class LocalFileLogBuffer implements LogBuffer { + private String mDirectory; + private String mFile; + private int mFlushIntervalSeconds = 1 * 60; + private int mFileBufferSizeBytes = 8 * 1024; + private String mEncoding; + private boolean mIsAppend = true; + private int mRolloverIntervalSeconds = 10 * 60; + private String mArchiveDirectory; + private int mArchiveFileCount = 10; + private final DebugTracer mLogger; + private Writer mWriter; + private String mBufferFilename; + private long mNextRolloverTime; + private long mNextFlushTime; + private final int mFileOpenRetryIntervalInMs = 60 * 1000; + private long mNextFileOpenRetryTime; + private DestinationDispatcherThread mDispatcherThread; -public class LocalFileLogBuffer implements LogBuffer { - private String mDirectory = null; - private String mFile = null; - private int mFlushIntervalSeconds = 1 * 60; - private int mFileBufferSizeBytes = 8 * 1024; - private String mEncoding = null; - private boolean mIsAppend = true; - private int mRolloverIntervalSeconds = 10 * 60; - private String mArchiveDirectory = null; - private int mArchiveFileCount = 10; - private DebugTracer mLogger = null; + public LocalFileLogBuffer(DebugTracer tracer) { + mLogger = tracer; + } - private Writer mWriter = null; - private String mBufferFilename = null; - private long mNextRolloverTime = 0; - private long mNextFlushTime = 0; - private int mFileOpenRetryIntervalInMs = 60 * 1000; - private long mNextFileOpenRetryTime = 0; - - private DestinationDispatcherThread mDispatcherThread = null; - - public LocalFileLogBuffer(DebugTracer tracer) { - mLogger = tracer; - } - - public String getDirectory() { - return mDirectory; - } - - public void setDirectory(String directory) { - mDirectory = directory; - } - - public String getFile() { - return mFile; - } - - public void setFile(String file) { - mFile = file; - } - - public int getFileBufferSizeBytes() { - return mFileBufferSizeBytes; - } - - public void setFileBufferSizeBytes(int fileBufferSizeBytes) { - mFileBufferSizeBytes = fileBufferSizeBytes; - } - - public int getFlushIntervalSeconds() { - return mFlushIntervalSeconds; - } - - public void setFlushIntervalSeconds(int flushIntervalSeconds) { - mFlushIntervalSeconds = flushIntervalSeconds; - } - - public String getEncoding() { - return mEncoding; - } - - public void setEncoding(String encoding) { - mEncoding = encoding; - } - - public boolean getIsAppend() { - return mIsAppend; - } - - public void setIsAppend(boolean isAppend) { - mIsAppend = isAppend; - } - - public int getRolloverIntervalSeconds() { - return mRolloverIntervalSeconds; - } - - public void setRolloverIntervalSeconds(int rolloverIntervalSeconds) { - mRolloverIntervalSeconds = rolloverIntervalSeconds; - } - - public String getArchiveDirectory() { - return mArchiveDirectory; - } - - public void setArchiveDirectory(String archiveDirectory) { - mArchiveDirectory = archiveDirectory; - } - - public int getArchiveFileCount() { - return mArchiveFileCount; - } - - public void setArchiveFileCount(int archiveFileCount) { - mArchiveFileCount = archiveFileCount; - } - - - @Override - public void start(LogDestination destination) { - mLogger.debug("==> LocalFileLogBuffer.start()"); - - mDispatcherThread = new DestinationDispatcherThread(this, destination, mLogger); - - mDispatcherThread.setDaemon(true); - - mDispatcherThread.start(); - - mLogger.debug("<== LocalFileLogBuffer.start()"); - } - - @Override - public void stop() { - mLogger.debug("==> LocalFileLogBuffer.stop()"); - - DestinationDispatcherThread dispatcherThread = mDispatcherThread; - mDispatcherThread = null; - - if(dispatcherThread != null && dispatcherThread.isAlive()) { - dispatcherThread.stopThread(); - - try { - dispatcherThread.join(); - } catch (InterruptedException e) { - mLogger.warn("LocalFileLogBuffer.stop(): failed in waiting for DispatcherThread", e); - } - } + public String getDirectory() { + return mDirectory; + } - closeFile(); - - mLogger.debug("<== LocalFileLogBuffer.stop()"); - } - - @Override - public boolean isAvailable() { - return mWriter != null; - } - - @Override - public boolean add(T log) { - boolean ret = false; - - String msg = MiscUtil.stringify(log); - - if(msg.contains(MiscUtil.LINE_SEPARATOR)) { - msg = msg.replace(MiscUtil.LINE_SEPARATOR, MiscUtil.ESCAPE_STR + MiscUtil.LINE_SEPARATOR); - } + public void setDirectory(String directory) { + mDirectory = directory; + } - synchronized(this) { - checkFileStatus(); - - Writer writer = mWriter; - - if(writer != null) { - try { - writer.write(msg + MiscUtil.LINE_SEPARATOR); - - if(mFileBufferSizeBytes == 0) { - writer.flush(); - } - - ret = true; - } catch(IOException excp) { - mLogger.warn("LocalFileLogBuffer.add(): write failed", excp); + public String getFile() { + return mFile; + } - closeFile(); - } - } - } - - return ret; - } - - @Override - public boolean isEmpty() { - return mDispatcherThread == null || mDispatcherThread.isIdle(); - } - - private synchronized void openFile() { - mLogger.debug("==> LocalFileLogBuffer.openFile()"); - - long now = System.currentTimeMillis(); - - closeFile(); - - if(mNextFileOpenRetryTime <= now) { - try { - mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); - - long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); - - mBufferFilename = MiscUtil.replaceTokens(mDirectory + File.separator + mFile, startTime); - - MiscUtil.createParents(new File(mBufferFilename)); - - FileOutputStream ostream = null; - try { - ostream = new FileOutputStream(mBufferFilename, mIsAppend); - } catch(Exception excp) { - mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file " + mBufferFilename, excp); - } - - if(ostream != null) { - mWriter = createWriter(ostream); - mLogger.debug("LocalFileLogBuffer.openFile(): opened file " + mBufferFilename); - mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); - } else { - mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file for write " + mBufferFilename); - mBufferFilename = null; - } - } finally { - if(mWriter == null) { - mNextFileOpenRetryTime = now + mFileOpenRetryIntervalInMs; - } - } - } - - mLogger.debug("<== LocalFileLogBuffer.openFile()"); - } - - private synchronized void closeFile() { - mLogger.debug("==> LocalFileLogBuffer.closeFile()"); - - Writer writer = mWriter; - - mWriter = null; - - if(writer != null) { - try { - writer.flush(); - writer.close(); - } catch(IOException excp) { - mLogger.warn("LocalFileLogBuffer: failed to close file " + mBufferFilename, excp); - } - - if(mDispatcherThread != null) { - mDispatcherThread.addLogfile(mBufferFilename); - } - } - - mLogger.debug("<== LocalFileLogBuffer.closeFile()"); - } - - private void rollover() { - mLogger.debug("==> LocalFileLogBuffer.rollover()"); - - closeFile(); - - openFile(); - - mLogger.debug("<== LocalFileLogBuffer.rollover()"); - } - - private void checkFileStatus() { - long now = System.currentTimeMillis(); - - if(now > mNextRolloverTime) { - rollover(); - } else if(mWriter == null) { - openFile(); - } else if(now > mNextFlushTime) { - try { - mNextFlushTime = now + (mFlushIntervalSeconds * 1000L); - - mWriter.flush(); - } catch (IOException excp) { - mLogger.warn("LocalFileLogBuffer: failed to flush to file " + mBufferFilename, excp); - } - } - } - - private Writer createWriter(OutputStream os ) { - Writer writer = null; - - if(os != null) { - if(mEncoding != null) { - try { - writer = new OutputStreamWriter(os, mEncoding); - } catch(UnsupportedEncodingException excp) { - mLogger.warn("LocalFileLogBuffer: failed to create output writer for file " + mBufferFilename, excp); - } - } - - if(writer == null) { - writer = new OutputStreamWriter(os); - } - - if(mFileBufferSizeBytes > 0 && writer != null) { - writer = new BufferedWriter(writer, mFileBufferSizeBytes); - } - } - - return writer; - } - - boolean isCurrentFilename(String filename) { - return filename != null && filename.equals(mBufferFilename); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - sb.append("LocalFileLogBuffer {"); - sb.append("Directory=").append(mDirectory).append("; "); - sb.append("File=").append(mFile).append("; "); - sb.append("RolloverIntervaSeconds=").append(mRolloverIntervalSeconds).append("; "); - sb.append("ArchiveDirectory=").append(mArchiveDirectory).append("; "); - sb.append("ArchiveFileCount=").append(mArchiveFileCount); - sb.append("}"); - - return sb.toString(); - } - -} + public void setFile(String file) { + mFile = file; + } -class DestinationDispatcherThread extends Thread { - private TreeSet mCompletedLogfiles = new TreeSet(); - private boolean mStopThread = false; - private LocalFileLogBuffer mFileLogBuffer = null; - private LogDestination mDestination = null; - private DebugTracer mLogger = null; - - private String mCurrentLogfile = null; - - public DestinationDispatcherThread(LocalFileLogBuffer fileLogBuffer, LogDestination destination, DebugTracer tracer) { - super(DestinationDispatcherThread.class.getSimpleName() + "-" + System.currentTimeMillis()); - - mLogger = tracer; - - mFileLogBuffer = fileLogBuffer; - mDestination = destination; - - setDaemon(true); - } - - public void addLogfile(String filename) { - mLogger.debug("==> DestinationDispatcherThread.addLogfile(" + filename + ")"); - - if(filename != null) { - synchronized(mCompletedLogfiles) { - mCompletedLogfiles.add(filename); - mCompletedLogfiles.notifyAll(); - } - } - - mLogger.debug("<== DestinationDispatcherThread.addLogfile(" + filename + ")"); - } - - public void stopThread() { - mStopThread = true; - } - - public boolean isIdle() { - synchronized(mCompletedLogfiles) { - return mCompletedLogfiles.isEmpty() && mCurrentLogfile == null; - } - } - - @Override - public void run() { - UserGroupInformation loginUser = null; - - try { - loginUser = UserGroupInformation.getLoginUser(); - } catch (IOException excp) { - mLogger.error("DestinationDispatcherThread.run(): failed to get login user details. Audit files will not be sent to HDFS destination", excp); - } - - if(loginUser == null) { - mLogger.error("DestinationDispatcherThread.run(): failed to get login user. Audit files will not be sent to HDFS destination"); - - return; - } - - try { - loginUser.doAs((PrivilegedExceptionAction) () -> { - doRun(); - - return 0; - }); - } catch (Exception excp) { - mLogger.error("DestinationDispatcherThread.run(): failed", excp); - } - } - - private void doRun() { - init(); - - mDestination.start(); - - long pollIntervalInMs = 1000L; - - while(! mStopThread) { - synchronized(mCompletedLogfiles) { - while(mCompletedLogfiles.isEmpty() && !mStopThread) { - try { - mCompletedLogfiles.wait(pollIntervalInMs); - } catch(InterruptedException excp) { - throw new RuntimeException("DestinationDispatcherThread.run(): failed to wait for log file", excp); - } - } - - mCurrentLogfile = mCompletedLogfiles.pollFirst(); - } - - if(mCurrentLogfile != null) { - sendCurrentFile(); - } - } - - mDestination.stop(); - } - - private void init() { - mLogger.debug("==> DestinationDispatcherThread.init()"); - - String dirName = MiscUtil.replaceTokens(mFileLogBuffer.getDirectory(), 0); - - if(dirName != null) { - File directory = new File(dirName); - - if(directory.exists() && directory.isDirectory()) { - File[] files = directory.listFiles(); - - if(files != null) { - for(File file : files) { - if(file.exists() && file.isFile() && file.canRead()) { - String filename = file.getAbsolutePath(); - if(! mFileLogBuffer.isCurrentFilename(filename)) { - addLogfile(filename); - } - } - } - } - } - } - - mLogger.debug("<== DestinationDispatcherThread.init()"); - } - - private boolean sendCurrentFile() { - mLogger.debug("==> DestinationDispatcherThread.sendCurrentFile()"); - - boolean ret = false; - - long destinationPollIntervalInMs = 1000L; - - BufferedReader reader = openCurrentFile(); - try { - while(!mStopThread) { - String log = getNextStringifiedLog(reader); - - if(log == null) { // reached end-of-file - ret = true; - - break; - } - - try { - // loop until log is sent successfully - while(!mStopThread && !mDestination.sendStringified(log)) { - try { - Thread.sleep(destinationPollIntervalInMs); - } catch(InterruptedException excp) { - throw new RuntimeException("LocalFileLogBuffer.sendCurrentFile(" + mCurrentLogfile + "): failed while waiting for destination to be available", excp); - } - } - } catch ( AuditMessageException msgError) { - mLogger.error("Error in log message:" + log); - //If there is error in log message, then it will be skipped - } - } - } finally { - closeCurrentFile(reader); - } - - if(!mStopThread) { - mDestination.flush(); - archiveCurrentFile(); - } - - mLogger.debug("<== DestinationDispatcherThread.sendCurrentFile()"); - - return ret; - } - - private String getNextStringifiedLog(BufferedReader mReader) { - String log = null; - - if(mReader != null) { - try { - while(true) { - String line = mReader.readLine(); - - if(line == null) { // reached end-of-file - break; - } - - if(line.endsWith(MiscUtil.ESCAPE_STR)) { - line = line.substring(0, line.length() - MiscUtil.ESCAPE_STR.length()); - - if(log == null) { - log = line; - } else { - log += MiscUtil.LINE_SEPARATOR; - log += line; - } - - continue; - } else { - if(log == null) { - log = line; - } else { - log += line; - } - break; - } - } - } catch (IOException excp) { - mLogger.warn("getNextStringifiedLog.getNextLog(): failed to read from file " + mCurrentLogfile, excp); - } - } - - return log; - } - - private BufferedReader openCurrentFile() { - mLogger.debug("==> openCurrentFile(" + mCurrentLogfile + ")"); - BufferedReader mReader = null; - - if(mCurrentLogfile != null) { - try { - FileInputStream inStr = new FileInputStream(mCurrentLogfile); - - InputStreamReader strReader = createReader(inStr); - - if(strReader != null) { - mReader = new BufferedReader(strReader); - } - } catch(FileNotFoundException excp) { - mLogger.warn("openNextFile(): error while opening file " + mCurrentLogfile, excp); - } - } - - mLogger.debug("<== openCurrentFile(" + mCurrentLogfile + ")"); - return mReader; - } - - private void closeCurrentFile(BufferedReader mReader) { - mLogger.debug("==> closeCurrentFile(" + mCurrentLogfile + ")"); - - if(mReader != null) { - try { - mReader.close(); - } catch(IOException excp) { - // ignore - } - } - - mLogger.debug("<== closeCurrentFile(" + mCurrentLogfile + ")"); - } - - private void archiveCurrentFile() { - if(mCurrentLogfile != null) { - File logFile = new File(mCurrentLogfile); - String archiveDirName = MiscUtil.replaceTokens(mFileLogBuffer.getArchiveDirectory(), 0); - String archiveFilename = archiveDirName + File.separator +logFile.getName(); - - try { - if(logFile.exists()) { - File archiveFile = new File(archiveFilename); - - MiscUtil.createParents(archiveFile); - - if(! logFile.renameTo(archiveFile)) { - // TODO: renameTo() does not work in all cases. in case of failure, copy the file contents to the destination and delete the file - mLogger.warn("archiving failed to move file: " + mCurrentLogfile + " ==> " + archiveFilename); - } - - File archiveDir = new File(archiveDirName); - File[] files = archiveDir.listFiles(new FileFilter() { - @Override - public boolean accept(File f) { - return f.isFile(); - } - }); - - int numOfFilesToDelete = files == null ? 0 : (files.length - mFileLogBuffer.getArchiveFileCount()); - - if(numOfFilesToDelete > 0) { - Arrays.sort(files, new Comparator() { - @Override - public int compare(File f1, File f2) { - return (int)(f1.lastModified() - f2.lastModified()); - } - }); - - for(int i = 0; i < numOfFilesToDelete; i++) { - if(! files[i].delete()) { - mLogger.warn("archiving failed to delete file: " + files[i].getAbsolutePath()); - } - } - } - } - } catch(Exception excp) { - mLogger.warn("archiveCurrentFile(): faile to move " + mCurrentLogfile + " to archive location " + archiveFilename, excp); - } - } - mCurrentLogfile = null; - } - - private InputStreamReader createReader(InputStream iStr) { - InputStreamReader reader = null; - - if(iStr != null) { - String encoding = mFileLogBuffer.getEncoding(); - - if(encoding != null) { - try { - reader = new InputStreamReader(iStr, encoding); - } catch(UnsupportedEncodingException excp) { - mLogger.warn("createReader(): failed to create input reader.", excp); - } - } - - if(reader == null) { - reader = new InputStreamReader(iStr); - } - } - - return reader; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - sb.append("DestinationDispatcherThread {"); - sb.append("ThreadName=").append(this.getName()).append("; "); - sb.append("CompletedLogfiles.size()=").append(mCompletedLogfiles.size()).append("; "); - sb.append("StopThread=").append(mStopThread).append("; "); - sb.append("CurrentLogfile=").append(mCurrentLogfile); - sb.append("}"); - - return sb.toString(); - } -} + public int getFileBufferSizeBytes() { + return mFileBufferSizeBytes; + } + + public void setFileBufferSizeBytes(int fileBufferSizeBytes) { + mFileBufferSizeBytes = fileBufferSizeBytes; + } + + public int getFlushIntervalSeconds() { + return mFlushIntervalSeconds; + } + + public void setFlushIntervalSeconds(int flushIntervalSeconds) { + mFlushIntervalSeconds = flushIntervalSeconds; + } + + public String getEncoding() { + return mEncoding; + } + + public void setEncoding(String encoding) { + mEncoding = encoding; + } + + public boolean getIsAppend() { + return mIsAppend; + } + + public void setIsAppend(boolean isAppend) { + mIsAppend = isAppend; + } + + public int getRolloverIntervalSeconds() { + return mRolloverIntervalSeconds; + } + + public void setRolloverIntervalSeconds(int rolloverIntervalSeconds) { + mRolloverIntervalSeconds = rolloverIntervalSeconds; + } + + public String getArchiveDirectory() { + return mArchiveDirectory; + } + + public void setArchiveDirectory(String archiveDirectory) { + mArchiveDirectory = archiveDirectory; + } + + public int getArchiveFileCount() { + return mArchiveFileCount; + } + + public void setArchiveFileCount(int archiveFileCount) { + mArchiveFileCount = archiveFileCount; + } + + @Override + public void start(LogDestination destination) { + mLogger.debug("==> LocalFileLogBuffer.start()"); + + mDispatcherThread = new DestinationDispatcherThread<>(this, destination, mLogger); + + mDispatcherThread.setDaemon(true); + + mDispatcherThread.start(); + + mLogger.debug("<== LocalFileLogBuffer.start()"); + } + + @Override + public void stop() { + mLogger.debug("==> LocalFileLogBuffer.stop()"); + + DestinationDispatcherThread dispatcherThread = mDispatcherThread; + + mDispatcherThread = null; + + if (dispatcherThread != null && dispatcherThread.isAlive()) { + dispatcherThread.stopThread(); + + try { + dispatcherThread.join(); + } catch (InterruptedException e) { + mLogger.warn("LocalFileLogBuffer.stop(): failed in waiting for DispatcherThread", e); + } + } + + closeFile(); + + mLogger.debug("<== LocalFileLogBuffer.stop()"); + } + + @Override + public boolean isAvailable() { + return mWriter != null; + } + + @Override + public boolean isEmpty() { + return mDispatcherThread == null || mDispatcherThread.isIdle(); + } + + @Override + public boolean add(T log) { + boolean ret = false; + String msg = MiscUtil.stringify(log); + + if (msg.contains(MiscUtil.LINE_SEPARATOR)) { + msg = msg.replace(MiscUtil.LINE_SEPARATOR, MiscUtil.ESCAPE_STR + MiscUtil.LINE_SEPARATOR); + } + + synchronized (this) { + checkFileStatus(); + Writer writer = mWriter; + + if (writer != null) { + try { + writer.write(msg + MiscUtil.LINE_SEPARATOR); + + if (mFileBufferSizeBytes == 0) { + writer.flush(); + } + + ret = true; + } catch (IOException excp) { + mLogger.warn("LocalFileLogBuffer.add(): write failed", excp); + + closeFile(); + } + } + } + + return ret; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("LocalFileLogBuffer {"); + sb.append("Directory=").append(mDirectory).append("; "); + sb.append("File=").append(mFile).append("; "); + sb.append("RolloverIntervaSeconds=").append(mRolloverIntervalSeconds).append("; "); + sb.append("ArchiveDirectory=").append(mArchiveDirectory).append("; "); + sb.append("ArchiveFileCount=").append(mArchiveFileCount); + sb.append("}"); + + return sb.toString(); + } + + boolean isCurrentFilename(String filename) { + return filename != null && filename.equals(mBufferFilename); + } + + private synchronized void openFile() { + mLogger.debug("==> LocalFileLogBuffer.openFile()"); + + long now = System.currentTimeMillis(); + + closeFile(); + + if (mNextFileOpenRetryTime <= now) { + try { + mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); + + long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); + + mBufferFilename = MiscUtil.replaceTokens(mDirectory + File.separator + mFile, startTime); + + MiscUtil.createParents(new File(mBufferFilename)); + + FileOutputStream ostream = null; + + try { + ostream = new FileOutputStream(mBufferFilename, mIsAppend); + } catch (Exception excp) { + mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file " + mBufferFilename, excp); + } + + if (ostream != null) { + mWriter = createWriter(ostream); + + mLogger.debug("LocalFileLogBuffer.openFile(): opened file " + mBufferFilename); + + mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); + } else { + mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file for write " + mBufferFilename); + + mBufferFilename = null; + } + } finally { + if (mWriter == null) { + mNextFileOpenRetryTime = now + mFileOpenRetryIntervalInMs; + } + } + } + + mLogger.debug("<== LocalFileLogBuffer.openFile()"); + } + + private synchronized void closeFile() { + mLogger.debug("==> LocalFileLogBuffer.closeFile()"); + + Writer writer = mWriter; + + mWriter = null; + + if (writer != null) { + try { + writer.flush(); + writer.close(); + } catch (IOException excp) { + mLogger.warn("LocalFileLogBuffer: failed to close file " + mBufferFilename, excp); + } + + if (mDispatcherThread != null) { + mDispatcherThread.addLogfile(mBufferFilename); + } + } + + mLogger.debug("<== LocalFileLogBuffer.closeFile()"); + } + + private void rollover() { + mLogger.debug("==> LocalFileLogBuffer.rollover()"); + + closeFile(); + + openFile(); + + mLogger.debug("<== LocalFileLogBuffer.rollover()"); + } + + private void checkFileStatus() { + long now = System.currentTimeMillis(); + + if (now > mNextRolloverTime) { + rollover(); + } else if (mWriter == null) { + openFile(); + } else if (now > mNextFlushTime) { + try { + mNextFlushTime = now + (mFlushIntervalSeconds * 1000L); + + mWriter.flush(); + } catch (IOException excp) { + mLogger.warn("LocalFileLogBuffer: failed to flush to file " + mBufferFilename, excp); + } + } + } + + private Writer createWriter(OutputStream os) { + Writer writer = null; + + if (os != null) { + if (mEncoding != null) { + try { + writer = new OutputStreamWriter(os, mEncoding); + } catch (UnsupportedEncodingException excp) { + mLogger.warn("LocalFileLogBuffer: failed to create output writer for file " + mBufferFilename, excp); + } + } + + if (writer == null) { + writer = new OutputStreamWriter(os); + } + + if (mFileBufferSizeBytes > 0) { + writer = new BufferedWriter(writer, mFileBufferSizeBytes); + } + } + + return writer; + } + + static class DestinationDispatcherThread extends Thread { + private final TreeSet mCompletedLogfiles = new TreeSet<>(); + private final LocalFileLogBuffer mFileLogBuffer; + private final LogDestination mDestination; + private final DebugTracer mLogger; + private String mCurrentLogfile; + private boolean mStopThread; + + public DestinationDispatcherThread(LocalFileLogBuffer fileLogBuffer, LogDestination destination, DebugTracer tracer) { + super(DestinationDispatcherThread.class.getSimpleName() + "-" + System.currentTimeMillis()); + + mLogger = tracer; + mFileLogBuffer = fileLogBuffer; + mDestination = destination; + + setDaemon(true); + } + + public void addLogfile(String filename) { + mLogger.debug("==> DestinationDispatcherThread.addLogfile(" + filename + ")"); + + if (filename != null) { + synchronized (mCompletedLogfiles) { + mCompletedLogfiles.add(filename); + mCompletedLogfiles.notifyAll(); + } + } + + mLogger.debug("<== DestinationDispatcherThread.addLogfile(" + filename + ")"); + } + + public void stopThread() { + mStopThread = true; + } + + public boolean isIdle() { + synchronized (mCompletedLogfiles) { + return mCompletedLogfiles.isEmpty() && mCurrentLogfile == null; + } + } + + @Override + public void run() { + UserGroupInformation loginUser = null; + + try { + loginUser = UserGroupInformation.getLoginUser(); + } catch (IOException excp) { + mLogger.error("DestinationDispatcherThread.run(): failed to get login user details. Audit files will not be sent to HDFS destination", excp); + } + + if (loginUser == null) { + mLogger.error("DestinationDispatcherThread.run(): failed to get login user. Audit files will not be sent to HDFS destination"); + + return; + } + + try { + loginUser.doAs((PrivilegedExceptionAction) () -> { + doRun(); + + return 0; + }); + } catch (Exception excp) { + mLogger.error("DestinationDispatcherThread.run(): failed", excp); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("DestinationDispatcherThread {"); + sb.append("ThreadName=").append(this.getName()).append("; "); + sb.append("CompletedLogfiles.size()=").append(mCompletedLogfiles.size()).append("; "); + sb.append("StopThread=").append(mStopThread).append("; "); + sb.append("CurrentLogfile=").append(mCurrentLogfile); + sb.append("}"); + + return sb.toString(); + } + + private void doRun() { + init(); + + mDestination.start(); + + long pollIntervalInMs = 1000L; + + while (!mStopThread) { + synchronized (mCompletedLogfiles) { + while (mCompletedLogfiles.isEmpty() && !mStopThread) { + try { + mCompletedLogfiles.wait(pollIntervalInMs); + } catch (InterruptedException excp) { + throw new RuntimeException("DestinationDispatcherThread.run(): failed to wait for log file", excp); + } + } + + mCurrentLogfile = mCompletedLogfiles.pollFirst(); + } + + if (mCurrentLogfile != null) { + sendCurrentFile(); + } + } + + mDestination.stop(); + } + + private void init() { + mLogger.debug("==> DestinationDispatcherThread.init()"); + + String dirName = MiscUtil.replaceTokens(mFileLogBuffer.getDirectory(), 0); + + if (dirName != null) { + File directory = new File(dirName); + + if (directory.exists() && directory.isDirectory()) { + File[] files = directory.listFiles(); + + if (files != null) { + for (File file : files) { + if (file.exists() && file.isFile() && file.canRead()) { + String filename = file.getAbsolutePath(); + + if (!mFileLogBuffer.isCurrentFilename(filename)) { + addLogfile(filename); + } + } + } + } + } + } + + mLogger.debug("<== DestinationDispatcherThread.init()"); + } + + private boolean sendCurrentFile() { + mLogger.debug("==> DestinationDispatcherThread.sendCurrentFile()"); + + boolean ret = false; + long destinationPollIntervalInMs = 1000L; + + BufferedReader reader = openCurrentFile(); + try { + while (!mStopThread) { + String log = getNextStringifiedLog(reader); + + if (log == null) { // reached end-of-file + ret = true; + + break; + } + + try { + // loop until log is sent successfully + while (!mStopThread && !mDestination.sendStringified(log)) { + try { + Thread.sleep(destinationPollIntervalInMs); + } catch (InterruptedException excp) { + throw new RuntimeException("LocalFileLogBuffer.sendCurrentFile(" + mCurrentLogfile + "): failed while waiting for destination to be available", excp); + } + } + } catch (AuditMessageException msgError) { + mLogger.error("Error in log message:" + log); + //If there is error in log message, then it will be skipped + } + } + } finally { + closeCurrentFile(reader); + } + + if (!mStopThread) { + mDestination.flush(); + archiveCurrentFile(); + } + + mLogger.debug("<== DestinationDispatcherThread.sendCurrentFile()"); + + return ret; + } + + private String getNextStringifiedLog(BufferedReader mReader) { + String log = null; + + if (mReader != null) { + try { + while (true) { + String line = mReader.readLine(); + + if (line == null) { // reached end-of-file + break; + } + + if (line.endsWith(MiscUtil.ESCAPE_STR)) { + line = line.substring(0, line.length() - MiscUtil.ESCAPE_STR.length()); + + if (log == null) { + log = line; + } else { + log += MiscUtil.LINE_SEPARATOR; + log += line; + } + } else { + if (log == null) { + log = line; + } else { + log += line; + } + break; + } + } + } catch (IOException excp) { + mLogger.warn("getNextStringifiedLog.getNextLog(): failed to read from file " + mCurrentLogfile, excp); + } + } + + return log; + } + + private BufferedReader openCurrentFile() { + mLogger.debug("==> openCurrentFile(" + mCurrentLogfile + ")"); + + BufferedReader mReader = null; + + if (mCurrentLogfile != null) { + try { + FileInputStream inStr = new FileInputStream(mCurrentLogfile); + + InputStreamReader strReader = createReader(inStr); + + if (strReader != null) { + mReader = new BufferedReader(strReader); + } + } catch (FileNotFoundException excp) { + mLogger.warn("openNextFile(): error while opening file " + mCurrentLogfile, excp); + } + } + + mLogger.debug("<== openCurrentFile(" + mCurrentLogfile + ")"); + + return mReader; + } + + private void closeCurrentFile(BufferedReader mReader) { + mLogger.debug("==> closeCurrentFile(" + mCurrentLogfile + ")"); + + if (mReader != null) { + try { + mReader.close(); + } catch (IOException excp) { + // ignore + } + } + + mLogger.debug("<== closeCurrentFile(" + mCurrentLogfile + ")"); + } + + private void archiveCurrentFile() { + if (mCurrentLogfile != null) { + File logFile = new File(mCurrentLogfile); + String archiveDirName = MiscUtil.replaceTokens(mFileLogBuffer.getArchiveDirectory(), 0); + String archiveFilename = archiveDirName + File.separator + logFile.getName(); + + try { + if (logFile.exists()) { + File archiveFile = new File(archiveFilename); + + MiscUtil.createParents(archiveFile); + + if (!logFile.renameTo(archiveFile)) { + // TODO: renameTo() does not work in all cases. in case of failure, copy the file contents to the destination and delete the file + mLogger.warn("archiving failed to move file: " + mCurrentLogfile + " ==> " + archiveFilename); + } + + File archiveDir = new File(archiveDirName); + File[] files = archiveDir.listFiles(File::isFile); + int numOfFilesToDelete = files == null ? 0 : (files.length - mFileLogBuffer.getArchiveFileCount()); + + if (numOfFilesToDelete > 0) { + Arrays.sort(files, (f1, f2) -> (int) (f1.lastModified() - f2.lastModified())); + + for (int i = 0; i < numOfFilesToDelete; i++) { + if (!files[i].delete()) { + mLogger.warn("archiving failed to delete file: " + files[i].getAbsolutePath()); + } + } + } + } + } catch (Exception excp) { + mLogger.warn("archiveCurrentFile(): faile to move " + mCurrentLogfile + " to archive location " + archiveFilename, excp); + } + } + + mCurrentLogfile = null; + } + + private InputStreamReader createReader(InputStream iStr) { + InputStreamReader reader = null; + + if (iStr != null) { + String encoding = mFileLogBuffer.getEncoding(); + + if (encoding != null) { + try { + reader = new InputStreamReader(iStr, encoding); + } catch (UnsupportedEncodingException excp) { + mLogger.warn("createReader(): failed to create input reader.", excp); + } + } + + if (reader == null) { + reader = new InputStreamReader(iStr); + } + } + + return reader; + } + } +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java index 0a874ef7df..350f778047 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,83 +18,81 @@ package org.apache.ranger.audit.provider; -import java.util.Collection; -import java.util.Properties; - import org.apache.ranger.audit.destination.AuditDestination; import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.model.AuthzAuditEvent; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collection; +import java.util.Properties; public class Log4jAuditProvider extends AuditDestination { + private static final Logger LOG = LoggerFactory.getLogger(Log4jAuditProvider.class); + private static final Logger AUDITLOG = LoggerFactory.getLogger("xaaudit." + Log4jAuditProvider.class.getName()); + + public static final String AUDIT_LOG4J_IS_ASYNC_PROP = "xasecure.audit.log4j.is.async"; + public static final String AUDIT_LOG4J_MAX_QUEUE_SIZE_PROP = "xasecure.audit.log4j.async.max.queue.size"; + public static final String AUDIT_LOG4J_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.log4j.async.max.flush.interval.ms"; + + public Log4jAuditProvider() { + LOG.info("Log4jAuditProvider: creating.."); + } + + @Override + public boolean log(AuditEventBase event) { + if (!AUDITLOG.isInfoEnabled()) { + return true; + } + + if (event != null) { + String eventStr = MiscUtil.stringify(event); + + AUDITLOG.info(eventStr); + } + + return true; + } + + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, AuthzAuditEvent.class); + + return log(eventObj); + } + + @Override + public boolean logJSON(Collection events) { + for (String event : events) { + logJSON(event); + } + + return true; + } + + @Override + public void init(Properties props) { + LOG.info("Log4jAuditProvider.init()"); + + super.init(props); + } + + @Override + public boolean log(Collection events) { + for (AuditEventBase event : events) { + log(event); + } + + return true; + } + + @Override + public void start() { + // intentionally left empty + } - private static final Logger LOG = LoggerFactory.getLogger(Log4jAuditProvider.class); - private static final Logger AUDITLOG = LoggerFactory.getLogger("xaaudit." + Log4jAuditProvider.class.getName()); - - public static final String AUDIT_LOG4J_IS_ASYNC_PROP = "xasecure.audit.log4j.is.async"; - public static final String AUDIT_LOG4J_MAX_QUEUE_SIZE_PROP = "xasecure.audit.log4j.async.max.queue.size"; - public static final String AUDIT_LOG4J_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.log4j.async.max.flush.interval.ms"; - - - public Log4jAuditProvider() { - LOG.info("Log4jAuditProvider: creating.."); - } - - @Override - public void init(Properties props) { - LOG.info("Log4jAuditProvider.init()"); - - super.init(props); - } - - @Override - public boolean log(AuditEventBase event) { - if(! AUDITLOG.isInfoEnabled()) - return true; - - if(event != null) { - String eventStr = MiscUtil.stringify(event); - AUDITLOG.info(eventStr); - } - return true; - } - - @Override - public boolean log(Collection events) { - for (AuditEventBase event : events) { - log(event); - } - return true; - } - - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - @Override - public boolean logJSON(Collection events) { - for (String event : events) { - logJSON(event); - } - return true; - } - - @Override - public void start() { - // intentionally left empty - } - - @Override - public void stop() { - // intentionally left empty - } - - - - + @Override + public void stop() { + // intentionally left empty + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java index cd5befcd2d..d246cd2cb7 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java @@ -19,41 +19,41 @@ import org.slf4j.Logger; public class Log4jTracer implements DebugTracer { - private Logger mLogger = null; + private final Logger mLogger; - public Log4jTracer(Logger logger) { - mLogger = logger; - } + public Log4jTracer(Logger logger) { + mLogger = logger; + } - public void debug(String msg) { - mLogger.debug(msg); - } + public void debug(String msg) { + mLogger.debug(msg); + } - public void debug(String msg, Throwable excp) { - mLogger.debug(msg, excp); - } + public void debug(String msg, Throwable excp) { + mLogger.debug(msg, excp); + } - public void info(String msg) { - mLogger.info(msg); - } + public void info(String msg) { + mLogger.info(msg); + } - public void info(String msg, Throwable excp) { - mLogger.info(msg, excp); - } + public void info(String msg, Throwable excp) { + mLogger.info(msg, excp); + } - public void warn(String msg) { - mLogger.warn(msg); - } + public void warn(String msg) { + mLogger.warn(msg); + } - public void warn(String msg, Throwable excp) { - mLogger.warn(msg, excp); - } + public void warn(String msg, Throwable excp) { + mLogger.warn(msg, excp); + } - public void error(String msg) { - mLogger.error(msg); - } + public void error(String msg) { + mLogger.error(msg); + } - public void error(String msg, Throwable excp) { - mLogger.error(msg, excp); - } + public void error(String msg, Throwable excp) { + mLogger.error(msg, excp); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogBuffer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogBuffer.java index d6646924f7..ca6d6d5128 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogBuffer.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogBuffer.java @@ -18,15 +18,14 @@ */ package org.apache.ranger.audit.provider; - public interface LogBuffer { - void start(LogDestination destination); + void start(LogDestination destination); - void stop(); + void stop(); - boolean isAvailable(); + boolean isAvailable(); - boolean isEmpty(); + boolean isEmpty(); - boolean add(T log); + boolean add(T log); } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogDestination.java index a9c3af938d..4c53662a8e 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LogDestination.java @@ -21,26 +21,26 @@ import org.apache.ranger.audit.model.AuditEventBase; public interface LogDestination { - void start(); + void start(); - void stop(); + void stop(); - boolean isAvailable(); + boolean isAvailable(); - boolean send(AuditEventBase log) throws AuditMessageException; + boolean send(AuditEventBase log) throws AuditMessageException; - boolean send(AuditEventBase[] logs) throws AuditMessageException; + boolean send(AuditEventBase[] logs) throws AuditMessageException; - boolean sendStringified(String log) throws AuditMessageException; + boolean sendStringified(String log) throws AuditMessageException; - boolean sendStringified(String[] logs) throws AuditMessageException; + boolean sendStringified(String[] logs) throws AuditMessageException; - boolean flush(); + boolean flush(); - /** - * Name for the destination - * - * @return - */ - String getName(); + /** + * Name for the destination + * + * @return + */ + String getName(); } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java index 8004b75dde..b4c7bb8a8e 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java @@ -16,6 +16,22 @@ */ package org.apache.ranger.audit.provider; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; +import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.Configuration; +import javax.security.auth.login.LoginContext; + import java.io.File; import java.io.IOException; import java.net.InetAddress; @@ -28,11 +44,13 @@ import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.util.ArrayList; +import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; +import java.util.HashSet; import java.util.Hashtable; import java.util.List; import java.util.Map; @@ -44,971 +62,972 @@ import java.util.UUID; import java.util.regex.Pattern; -import javax.security.auth.Subject; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.Configuration; -import javax.security.auth.login.LoginContext; - -import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.util.KerberosName; -import org.apache.hadoop.security.authentication.util.KerberosUtil; -import org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.fasterxml.jackson.core.JsonParser; - import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; public class MiscUtil { - private static final Logger logger = LoggerFactory.getLogger(MiscUtil.class); - - public static final String TOKEN_START = "%"; - public static final String TOKEN_END = "%"; - public static final String TOKEN_HOSTNAME = "hostname"; - public static final String TOKEN_APP_TYPE = "app-type"; - public static final String TOKEN_JVM_INSTANCE = "jvm-instance"; - public static final String TOKEN_TIME = "time:"; - public static final String TOKEN_PROPERTY = "property:"; - public static final String TOKEN_ENV = "env:"; - public static final String ESCAPE_STR = "\\"; - - private static final VMID sJvmID = new VMID(); - - public static String LINE_SEPARATOR = System.getProperty("line.separator"); - - static private final ThreadLocal MAPPER = new ThreadLocal() { - @Override - protected ObjectMapper initialValue() { - ObjectMapper objectMapper = new ObjectMapper(); - SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); - objectMapper.setDateFormat(dateFormat); - objectMapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false); - objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true); - return objectMapper; - } - } ; - - static public ObjectMapper getMapper() { - return MAPPER.get(); - } - private static String sApplicationType = null; - private static UserGroupInformation ugiLoginUser = null; - private static Subject subjectLoginUser = null; - private static String local_hostname = null; - - private static Map logHistoryList = new Hashtable(); - private static int logInterval = 30000; // 30 seconds - - static { - initLocalHost(); - } - - public static String replaceTokens(String str, long time) { - if (str == null) { - return str; - } - - if (time <= 0) { - time = System.currentTimeMillis(); - } - - for (int startPos = 0; startPos < str.length();) { - int tagStartPos = str.indexOf(TOKEN_START, startPos); - - if (tagStartPos == -1) { - break; - } - - int tagEndPos = str.indexOf(TOKEN_END, - tagStartPos + TOKEN_START.length()); - - if (tagEndPos == -1) { - break; - } - - String tag = str.substring(tagStartPos, - tagEndPos + TOKEN_END.length()); - String token = tag.substring(TOKEN_START.length(), - tag.lastIndexOf(TOKEN_END)); - String val = ""; - - if (token != null) { - if (token.equals(TOKEN_HOSTNAME)) { - val = getHostname(); - } else if (token.equals(TOKEN_APP_TYPE)) { - val = getApplicationType(); - } else if (token.equals(TOKEN_JVM_INSTANCE)) { - val = getJvmInstanceId(); - } else if (token.startsWith(TOKEN_PROPERTY)) { - String propertyName = token.substring(TOKEN_PROPERTY - .length()); - - val = getSystemProperty(propertyName); - } else if (token.startsWith(TOKEN_ENV)) { - String envName = token.substring(TOKEN_ENV.length()); - - val = getEnv(envName); - } else if (token.startsWith(TOKEN_TIME)) { - String dtFormat = token.substring(TOKEN_TIME.length()); - - val = getFormattedTime(time, dtFormat); - } - } - - if (val == null) { - val = ""; - } - - str = str.substring(0, tagStartPos) + val - + str.substring(tagEndPos + TOKEN_END.length()); - startPos = tagStartPos + val.length(); - } - - return str; - } - - public static String getHostname() { - String ret = local_hostname; - - if (ret == null) { - initLocalHost(); - - ret = local_hostname; - - if (ret == null) { - ret = "unknown"; - } - } - - return ret; - } - - public static void setApplicationType(String applicationType) { - sApplicationType = applicationType; - } - - public static String getApplicationType() { - return sApplicationType; - } - - public static String getJvmInstanceId() { - Integer val = Integer.valueOf(sJvmID.toString().hashCode()); - long longVal = val.longValue(); - String ret = Long.toString(Math.abs(longVal)); - - return ret; - } - - public static String getSystemProperty(String propertyName) { - String ret = null; - - try { - ret = propertyName != null ? System.getProperty(propertyName) - : null; - } catch (Exception excp) { - logger.warn("getSystemProperty(" + propertyName + ") failed", excp); - } - - return ret; - } - - public static String getEnv(String envName) { - String ret = null; - - try { - ret = envName != null ? System.getenv(envName) : null; - } catch (Exception excp) { - logger.warn("getenv(" + envName + ") failed", excp); - } - - return ret; - } - - public static String getFormattedTime(long time, String format) { - String ret = null; - - try { - SimpleDateFormat sdf = new SimpleDateFormat(format); - - ret = sdf.format(time); - } catch (Exception excp) { - logger.warn("SimpleDateFormat.format() failed: " + format, excp); - } - - return ret; - } - - public static void createParents(File file) { - if (file != null) { - String parentName = file.getParent(); + private static final Logger logger = LoggerFactory.getLogger(MiscUtil.class); + + public static final String TOKEN_START = "%"; + public static final String TOKEN_END = "%"; + public static final String TOKEN_HOSTNAME = "hostname"; + public static final String TOKEN_APP_TYPE = "app-type"; + public static final String TOKEN_JVM_INSTANCE = "jvm-instance"; + public static final String TOKEN_TIME = "time:"; + public static final String TOKEN_PROPERTY = "property:"; + public static final String TOKEN_ENV = "env:"; + public static final String ESCAPE_STR = "\\"; + public static final String LINE_SEPARATOR = System.lineSeparator(); + + private static String sApplicationType; + private static UserGroupInformation ugiLoginUser; + private static Subject subjectLoginUser; + private static String localHostname; + private static final Map logHistoryList = new Hashtable<>(); + private static final int logInterval = 30000; // 30 seconds + private static final VMID sJvmID = new VMID(); + private static final ThreadLocal MAPPER = ThreadLocal.withInitial(() -> { + ObjectMapper objectMapper = new ObjectMapper(); + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + + objectMapper.setDateFormat(dateFormat); + objectMapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false); + objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true); + + return objectMapper; + }); + + private MiscUtil() { + // to block instantiation + } + + public static ObjectMapper getMapper() { + return MAPPER.get(); + } + + public static String replaceTokens(String str, long time) { + if (str == null) { + return str; + } + + if (time <= 0) { + time = System.currentTimeMillis(); + } + + for (int startPos = 0; startPos < str.length(); ) { + int tagStartPos = str.indexOf(TOKEN_START, startPos); - if (parentName != null) { - File parentDir = new File(parentName); + if (tagStartPos == -1) { + break; + } - if (!parentDir.exists()) { - if (!parentDir.mkdirs()) { - logger.warn("createParents(): failed to create " - + parentDir.getAbsolutePath()); - } - } - } - } - } + int tagEndPos = str.indexOf(TOKEN_END, tagStartPos + TOKEN_START.length()); - public static long getNextRolloverTime(long lastRolloverTime, long interval) { - long now = System.currentTimeMillis() / 1000 * 1000; // round to second + if (tagEndPos == -1) { + break; + } - if (lastRolloverTime <= 0) { - // should this be set to the next multiple-of-the-interval from - // start of the day? - return now + interval; - } else if (lastRolloverTime <= now) { - long nextRolloverTime = now + interval; + String tag = str.substring(tagStartPos, tagEndPos + TOKEN_END.length()); + String token = tag.substring(TOKEN_START.length(), tag.lastIndexOf(TOKEN_END)); + String val = ""; - // keep it at 'interval' boundary - long trimInterval = (nextRolloverTime - lastRolloverTime) - % interval; + if (token.equals(TOKEN_HOSTNAME)) { + val = getHostname(); + } else if (token.equals(TOKEN_APP_TYPE)) { + val = getApplicationType(); + } else if (token.equals(TOKEN_JVM_INSTANCE)) { + val = getJvmInstanceId(); + } else if (token.startsWith(TOKEN_PROPERTY)) { + String propertyName = token.substring(TOKEN_PROPERTY.length()); - return nextRolloverTime - trimInterval; - } else { - return lastRolloverTime; - } - } - - public static long getRolloverStartTime(long nextRolloverTime, long interval) { - return (nextRolloverTime <= interval) ? System.currentTimeMillis() - : nextRolloverTime - interval; - } - - public static int parseInteger(String str, int defValue) { - int ret = defValue; - - if (str != null) { - try { - ret = Integer.parseInt(str); - } catch (Exception excp) { - // ignore - } - } - - return ret; - } - - public static String generateUniqueId() { - return UUID.randomUUID().toString(); - } - - // UUID.randomUUID() uses SecureRandom, which is seen to be slow in some environments; this method uses Random - public static String generateGuid() { - byte[] randomBytes = new byte[16]; - - RandomHolder.random.nextBytes(randomBytes); - - UUID uuid = UUID.nameUUIDFromBytes(randomBytes); - - return uuid.toString(); - } + val = getSystemProperty(propertyName); + } else if (token.startsWith(TOKEN_ENV)) { + String envName = token.substring(TOKEN_ENV.length()); - public static String stringify(T log) { - String ret = null; - - if (log != null) { - if (log instanceof String) { - ret = (String) log; + val = getEnv(envName); + } else if (token.startsWith(TOKEN_TIME)) { + String dtFormat = token.substring(TOKEN_TIME.length()); + + val = getFormattedTime(time, dtFormat); + } + + if (val == null) { + val = ""; + } + + str = str.substring(0, tagStartPos) + val + str.substring(tagEndPos + TOKEN_END.length()); + startPos = tagStartPos + val.length(); + } + + return str; + } + + public static String getHostname() { + String ret = localHostname; + + if (ret == null) { + initLocalHost(); + + ret = localHostname; + + if (ret == null) { + ret = "unknown"; + } + } + + return ret; + } + + public static String getApplicationType() { + return sApplicationType; + } + + public static void setApplicationType(String applicationType) { + sApplicationType = applicationType; + } + + public static String getJvmInstanceId() { + int val = sJvmID.toString().hashCode(); + + return Long.toString(Math.abs((long) val)); + } + + public static String getSystemProperty(String propertyName) { + String ret = null; + + try { + ret = propertyName != null ? System.getProperty(propertyName) : null; + } catch (Exception excp) { + logger.warn("getSystemProperty({}) failed", propertyName, excp); + } + + return ret; + } + + public static String getEnv(String envName) { + String ret = null; + + try { + ret = envName != null ? System.getenv(envName) : null; + } catch (Exception excp) { + logger.warn("getenv({}) failed", envName, excp); + } + + return ret; + } + + public static String getFormattedTime(long time, String format) { + String ret = null; + + try { + SimpleDateFormat sdf = new SimpleDateFormat(format); + + ret = sdf.format(time); + } catch (Exception excp) { + logger.warn("SimpleDateFormat.format() failed: {}", format, excp); + } + + return ret; + } + + public static void createParents(File file) { + if (file != null) { + String parentName = file.getParent(); + + if (parentName != null) { + File parentDir = new File(parentName); + + if (!parentDir.exists()) { + if (!parentDir.mkdirs()) { + logger.warn("createParents(): failed to create {}", parentDir.getAbsolutePath()); + } + } + } + } + } + + public static long getNextRolloverTime(long lastRolloverTime, long interval) { + long now = System.currentTimeMillis() / 1000 * 1000; // round to second + + if (lastRolloverTime <= 0) { + // should this be set to the next multiple-of-the-interval from + // start of the day? + return now + interval; + } else if (lastRolloverTime <= now) { + long nextRolloverTime = now + interval; + + // keep it at 'interval' boundary + long trimInterval = (nextRolloverTime - lastRolloverTime) % interval; + + return nextRolloverTime - trimInterval; + } else { + return lastRolloverTime; + } + } + + public static long getRolloverStartTime(long nextRolloverTime, long interval) { + return (nextRolloverTime <= interval) ? System.currentTimeMillis() : nextRolloverTime - interval; + } + + public static int parseInteger(String str, int defValue) { + int ret = defValue; + + if (str != null) { + try { + ret = Integer.parseInt(str); + } catch (Exception excp) { + // ignore + } + } + + return ret; + } + + public static String generateUniqueId() { + return UUID.randomUUID().toString(); + } + + // UUID.randomUUID() uses SecureRandom, which is seen to be slow in some environments; this method uses Random + public static String generateGuid() { + byte[] randomBytes = new byte[16]; + + RandomHolder.random.nextBytes(randomBytes); + + UUID uuid = UUID.nameUUIDFromBytes(randomBytes); + + return uuid.toString(); + } + + public static String stringify(T log) { + String ret = null; + + if (log != null) { + if (log instanceof String) { + ret = (String) log; } else if (getMapper() != null) { try { ret = getMapper().writeValueAsString(log); } catch (Exception e) { - logger.error("Error occurred while processing JSOn object " + log, e); + logger.error("Error occurred while processing JSOn object {}", log, e); + ret = log.toString(); // Fallback to default toString() method } - } else { - ret = log.toString(); - } - } + } else { + ret = log.toString(); + } + } - return ret; - } + return ret; + } - static public T fromJson(String jsonStr, Class clazz) { + public static T fromJson(String jsonStr, Class clazz) { try { return getMapper().readValue(jsonStr, clazz); } catch (Exception exception) { - logger.error("Error occurred while processing JSOn object " + jsonStr, exception); + logger.error("Error occurred while processing JSOn object {}", jsonStr, exception); + } + + return null; + } + + public static String getStringProperty(Properties props, String propName) { + String ret = null; + + if (props != null && propName != null) { + String val = props.getProperty(propName); + + if (val != null) { + ret = val; + } + } + + return ret; + } + + public static String getStringProperty(Properties props, String propName, String defValue) { + String ret = defValue; + + if (props != null && propName != null) { + String val = props.getProperty(propName); + + if (val != null) { + ret = val; + } + } + + return ret; + } + + public static boolean getBooleanProperty(Properties props, String propName, boolean defValue) { + boolean ret = defValue; + + if (props != null && propName != null) { + String val = props.getProperty(propName); + + if (val != null) { + ret = Boolean.parseBoolean(val); + } + } + + return ret; + } + + public static int getIntProperty(Properties props, String propName, int defValue) { + int ret = defValue; + + if (props != null && propName != null) { + String val = props.getProperty(propName); + + if (val != null) { + try { + ret = Integer.parseInt(val); + } catch (NumberFormatException excp) { + // ignore + } + } + } + + return ret; + } + + public static long getLongProperty(Properties props, String propName, long defValue) { + long ret = defValue; + + if (props != null && propName != null) { + String val = props.getProperty(propName); + + if (val != null) { + try { + ret = Long.parseLong(val); + } catch (NumberFormatException excp) { + // ignore + } + } + } + + return ret; + } + + public static Map getPropertiesWithPrefix(Properties props, String prefix) { + Map prefixedProperties = new HashMap<>(); + + if (props != null && prefix != null) { + for (String key : props.stringPropertyNames()) { + if (key == null) { + continue; + } + + String val = props.getProperty(key); + + if (key.startsWith(prefix)) { + key = key.substring(prefix.length()); + + prefixedProperties.put(key, val); + } + } + } + + return prefixedProperties; + } + + /** + * @param destListStr + * @param delim + * @return + */ + public static List toArray(String destListStr, String delim) { + List list = new ArrayList<>(); + + if (destListStr != null && !destListStr.isEmpty()) { + StringTokenizer tokenizer = new StringTokenizer(destListStr, delim.trim()); + + while (tokenizer.hasMoreTokens()) { + list.add(tokenizer.nextToken()); + } } + + return list; + } + + public static String getCredentialString(String url, String alias) { + if (url != null && alias != null) { + return RangerCredentialProvider.getInstance().getCredentialString(url, alias); + } + return null; - } - - public static String getStringProperty(Properties props, String propName) { - String ret = null; - - if (props != null && propName != null) { - String val = props.getProperty(propName); - if (val != null) { - ret = val; - } - } - - return ret; - } - - public static String getStringProperty(Properties props, String propName, String defValue) { - String ret = defValue; - - if (props != null && propName != null) { - String val = props.getProperty(propName); - if (val != null) { - ret = val; - } - } - - return ret; - } - - public static boolean getBooleanProperty(Properties props, String propName, - boolean defValue) { - boolean ret = defValue; - - if (props != null && propName != null) { - String val = props.getProperty(propName); - - if (val != null) { - ret = Boolean.valueOf(val); - } - } - - return ret; - } - - public static int getIntProperty(Properties props, String propName, - int defValue) { - int ret = defValue; - - if (props != null && propName != null) { - String val = props.getProperty(propName); - if (val != null) { - try { - ret = Integer.parseInt(val); - } catch (NumberFormatException excp) { - ret = defValue; - } - } - } - - return ret; - } - - public static long getLongProperty(Properties props, String propName, - long defValue) { - long ret = defValue; - - if (props != null && propName != null) { - String val = props.getProperty(propName); - if (val != null) { - try { - ret = Long.parseLong(val); - } catch (NumberFormatException excp) { - ret = defValue; - } - } - } - - return ret; - } - - public static Map getPropertiesWithPrefix(Properties props, - String prefix) { - Map prefixedProperties = new HashMap(); - - if (props != null && prefix != null) { - for (String key : props.stringPropertyNames()) { - if (key == null) { - continue; - } - - String val = props.getProperty(key); - - if (key.startsWith(prefix)) { - key = key.substring(prefix.length()); - - if (key == null) { - continue; - } - - prefixedProperties.put(key, val); - } - } - } - - return prefixedProperties; - } - - /** - * @param destListStr - * @param delim - * @return - */ - public static List toArray(String destListStr, String delim) { - List list = new ArrayList(); - if (destListStr != null && !destListStr.isEmpty()) { - StringTokenizer tokenizer = new StringTokenizer(destListStr, - delim.trim()); - while (tokenizer.hasMoreTokens()) { - list.add(tokenizer.nextToken()); - } - } - return list; - } - - public static String getCredentialString(String url, String alias) { - if (url != null && alias != null) { - return RangerCredentialProvider.getInstance() - .getCredentialString(url, alias); - } - return null; - } - - public static UserGroupInformation createUGIFromSubject(Subject subject) - throws IOException { - logger.info("SUBJECT " + (subject == null ? "not found" : "found")); - UserGroupInformation ugi = null; - if (subject != null) { - logger.info("SUBJECT.PRINCIPALS.size()=" - + subject.getPrincipals().size()); - Set principals = subject.getPrincipals(); - for (Principal principal : principals) { - logger.info("SUBJECT.PRINCIPAL.NAME=" + principal.getName()); - } - try { - // Do not remove the below statement. The default - // getLoginUser does some initialization which is needed - // for getUGIFromSubject() to work. - UserGroupInformation.getLoginUser(); - logger.info("Default UGI before using new Subject:" - + UserGroupInformation.getLoginUser()); - } catch (Throwable t) { - logger.error("failed to get login user", t); - } - ugi = UserGroupInformation.getUGIFromSubject(subject); - logger.info("SUBJECT.UGI.NAME=" + ugi.getUserName() + ", ugi=" - + ugi); - } else { - logger.info("Server username is not available"); - } - return ugi; - } - - /** - * @param newUGI - * @param newSubject - */ - public static void setUGILoginUser(UserGroupInformation newUGI, - Subject newSubject) { - if (newUGI != null) { - UserGroupInformation.setLoginUser(newUGI); - ugiLoginUser = newUGI; - logger.info("Setting UGI=" + newUGI); - } else { - logger.error("UGI is null. Not setting it."); - } - if (newSubject != null) { - logger.info("Setting SUBJECT"); - subjectLoginUser = newSubject; - } - } - - public static UserGroupInformation getUGILoginUser() { - UserGroupInformation ret = ugiLoginUser; - - if (ret == null) { - try { - // Do not cache ugiLoginUser if it is not explicitly set with - // setUGILoginUser. - // It appears that the user represented by - // the returned object is periodically logged out and logged back - // in when the token is scheduled to expire. So it is better - // to get the user object every time from UserGroupInformation class and - // not cache it - ret = getLoginUser(); - } catch (IOException e) { - logger.error("Error getting UGI.", e); - } - } - - if(ret != null) { - try { - ret.checkTGTAndReloginFromKeytab(); - } catch(IOException ioe) { - logger.error("Error renewing TGT and relogin. Ignoring Exception, and continuing with the old TGT", ioe); - } - } - - return ret; - } - - /** - * Execute the {@link PrivilegedExceptionAction} on the {@link UserGroupInformation} if it's set, otherwise call it directly - */ - public static X executePrivilegedAction(final PrivilegedExceptionAction action) throws Exception { - final UserGroupInformation ugi = getUGILoginUser(); - if (ugi != null) { - return ugi.doAs(action); - } else { - return action.run(); - } - } - - /** - * Execute the {@link PrivilegedAction} on the {@link UserGroupInformation} if it's set, otherwise call it directly. - */ - public static X executePrivilegedAction(final PrivilegedAction action) { - final UserGroupInformation ugi = getUGILoginUser(); - if (ugi != null) { - return ugi.doAs(action); - } else { - return action.run(); - } - } - - public static Subject getSubjectLoginUser() { - return subjectLoginUser; - } - - public static String getKerberosNamesRules() { - return KerberosName.getRules(); - } - /** - * - * @param principal - * This could be in the format abc/host@domain.com - * @return - */ - static public String getShortNameFromPrincipalName(String principal) { - if (principal == null) { - return null; - } - try { - // Assuming it is kerberos name for now - KerberosName kerbrosName = new KerberosName(principal); - String userName = kerbrosName.getShortName(); - userName = StringUtils.substringBefore(userName, "/"); - userName = StringUtils.substringBefore(userName, "@"); - return userName; - } catch (Throwable t) { - logger.error("Error converting kerberos name. principal=" - + principal + ", KerberosName.rules=" + KerberosName.getRules()); - } - return principal; - } - - /** - * @param userName - * @return - */ - static public Set getGroupsForRequestUser(String userName) { - if (userName != null) { - try { - UserGroupInformation ugi = UserGroupInformation - .createRemoteUser(userName); - String[] groups = ugi.getGroupNames(); - if (groups != null && groups.length > 0) { - Set groupsSet = new java.util.HashSet(); - for (String group : groups) { - groupsSet.add(group); - } - return groupsSet; - } - } catch (Throwable e) { - logErrorMessageByInterval(logger, - "Error getting groups for users. userName=" + userName, e); - } - } - return Collections.emptySet(); - } - - static public boolean logErrorMessageByInterval(Logger useLogger, - String message) { - return logErrorMessageByInterval(useLogger, message, null); - } - - /** - * @param useLogger - * @param message - * @param e - */ - static public boolean logErrorMessageByInterval(Logger useLogger, - String message, Throwable e) { + } + + public static UserGroupInformation createUGIFromSubject(Subject subject) throws IOException { + logger.info("SUBJECT {}", (subject == null ? "not found" : "found")); + + UserGroupInformation ugi = null; + + if (subject != null) { + logger.info("SUBJECT.PRINCIPALS.size()={}", subject.getPrincipals().size()); + + Set principals = subject.getPrincipals(); + + for (Principal principal : principals) { + logger.info("SUBJECT.PRINCIPAL.NAME={}", principal.getName()); + } + + try { + // Do not remove the below statement. The default + // getLoginUser does some initialization which is needed + // for getUGIFromSubject() to work. + UserGroupInformation.getLoginUser(); + + logger.info("Default UGI before using new Subject:{}", UserGroupInformation.getLoginUser()); + } catch (Throwable t) { + logger.error("failed to get login user", t); + } + + ugi = UserGroupInformation.getUGIFromSubject(subject); + + logger.info("SUBJECT.UGI.NAME={}, ugi={}", ugi.getUserName(), ugi); + } else { + logger.info("Server username is not available"); + } + + return ugi; + } + + /** + * @param newUGI + * @param newSubject + */ + public static void setUGILoginUser(UserGroupInformation newUGI, Subject newSubject) { + if (newUGI != null) { + UserGroupInformation.setLoginUser(newUGI); + + ugiLoginUser = newUGI; + + logger.info("Setting UGI={}", newUGI); + } else { + logger.error("UGI is null. Not setting it."); + } + + if (newSubject != null) { + logger.info("Setting SUBJECT"); + + subjectLoginUser = newSubject; + } + } + + public static UserGroupInformation getUGILoginUser() { + UserGroupInformation ret = ugiLoginUser; + + if (ret == null) { + try { + // Do not cache ugiLoginUser if it is not explicitly set with + // setUGILoginUser. + // It appears that the user represented by + // the returned object is periodically logged out and logged back + // in when the token is scheduled to expire. So it is better + // to get the user object every time from UserGroupInformation class and + // not cache it + ret = getLoginUser(); + } catch (IOException e) { + logger.error("Error getting UGI.", e); + } + } + + if (ret != null) { + try { + ret.checkTGTAndReloginFromKeytab(); + } catch (IOException ioe) { + logger.error("Error renewing TGT and relogin. Ignoring Exception, and continuing with the old TGT", ioe); + } + } + + return ret; + } + + /** + * Execute the {@link PrivilegedExceptionAction} on the {@link UserGroupInformation} if it's set, otherwise call it directly + */ + public static X executePrivilegedAction(final PrivilegedExceptionAction action) throws Exception { + final UserGroupInformation ugi = getUGILoginUser(); + + if (ugi != null) { + return ugi.doAs(action); + } else { + return action.run(); + } + } + + /** + * Execute the {@link PrivilegedAction} on the {@link UserGroupInformation} if it's set, otherwise call it directly. + */ + public static X executePrivilegedAction(final PrivilegedAction action) { + final UserGroupInformation ugi = getUGILoginUser(); + + if (ugi != null) { + return ugi.doAs(action); + } else { + return action.run(); + } + } + + public static Subject getSubjectLoginUser() { + return subjectLoginUser; + } + + public static String getKerberosNamesRules() { + return KerberosName.getRules(); + } + + /** + * @param principal This could be in the format abc/host@domain.com + * @return + */ + public static String getShortNameFromPrincipalName(String principal) { + if (principal == null) { + return null; + } + + try { + // Assuming it is kerberos name for now + KerberosName kerbrosName = new KerberosName(principal); + String userName = kerbrosName.getShortName(); + + userName = StringUtils.substringBefore(userName, "/"); + userName = StringUtils.substringBefore(userName, "@"); + + return userName; + } catch (Throwable t) { + logger.error("Error converting kerberos name. principal={}, KerberosName.rules={}", principal, KerberosName.getRules()); + } + + return principal; + } + + /** + * @param userName + * @return + */ + public static Set getGroupsForRequestUser(String userName) { + if (userName != null) { + try { + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userName); + String[] groups = ugi.getGroupNames(); + + if (groups != null && groups.length > 0) { + return new HashSet<>(Arrays.asList(groups)); + } + } catch (Throwable e) { + logErrorMessageByInterval(logger, "Error getting groups for users. userName=" + userName, e); + } + } + + return Collections.emptySet(); + } + + public static boolean logErrorMessageByInterval(Logger useLogger, String message) { + return logErrorMessageByInterval(useLogger, message, null); + } + + /** + * @param useLogger + * @param message + * @param e + */ + public static boolean logErrorMessageByInterval(Logger useLogger, String message, Throwable e) { if (message == null) { return false; } - LogHistory log = logHistoryList.get(message); - if (log == null) { - log = new LogHistory(); - logHistoryList.put(message, log); - } - if ((System.currentTimeMillis() - log.lastLogTime) > logInterval) { - log.lastLogTime = System.currentTimeMillis(); - int counter = log.counter; - log.counter = 0; - if (counter > 0) { - message += ". Messages suppressed before: " + counter; - } - if (e == null) { - useLogger.error(message); - } else { - useLogger.error(message, e); - } - - return true; - } else { - log.counter++; - } - return false; - - } - - public static void setUGIFromJAASConfig(String jaasConfigAppName) throws Exception { - String keytabFile = null; - String principal = null; - UserGroupInformation ugi = null; - if (logger.isDebugEnabled()){ - logger.debug("===> MiscUtil.setUGIFromJAASConfig() jaasConfigAppName: " + jaasConfigAppName); - } - try { - AppConfigurationEntry entries[] = Configuration.getConfiguration().getAppConfigurationEntry(jaasConfigAppName); - if(!ArrayUtils.isEmpty(entries)) { - for (AppConfigurationEntry entry : entries) { - if (entry.getOptions().get("keyTab") != null) { - keytabFile = (String) entry.getOptions().get("keyTab"); - } - if (entry.getOptions().get("principal") != null) { - principal = (String) entry.getOptions().get("principal"); - } - if (!StringUtils.isEmpty(principal) && !StringUtils.isEmpty(keytabFile)) { - break; - } - } - if (!StringUtils.isEmpty(principal) && !StringUtils.isEmpty(keytabFile)) { - // This will login and set the UGI - UserGroupInformation.loginUserFromKeytab(principal, keytabFile); - ugi = UserGroupInformation.getLoginUser(); - } else { - String error_mesage = "Unable to get the principal/keytab from jaasConfigAppName: " + jaasConfigAppName; - logger.error(error_mesage); - throw new Exception(error_mesage); - } - logger.info("MiscUtil.setUGIFromJAASConfig() UGI: " + ugi + " principal: " + principal + " keytab: " + keytabFile); - } else { - logger.warn("JAASConfig file not found! Ranger Plugin will not working in a Secure Cluster..."); - } - } catch ( Exception e) { - logger.error("Unable to set UGI for Principal: " + principal + " keytab: " + keytabFile ); - throw e; - } - if (logger.isDebugEnabled()) { - logger.debug("<=== MiscUtil.setUGIFromJAASConfig() jaasConfigAppName: " + jaasConfigAppName + " UGI: " + ugi + " principal: " + principal + " keytab: " + keytabFile); - } - } - - public static void authWithKerberos(String keytab, String principal, - String nameRules) { - - if (keytab == null || principal == null) { - return; - } - Subject serverSubject = new Subject(); - int successLoginCount = 0; - String[] spnegoPrincipals = null; - - try { - if (principal.equals("*")) { - spnegoPrincipals = KerberosUtil.getPrincipalNames(keytab, - Pattern.compile("HTTP/.*")); - if (spnegoPrincipals.length == 0) { - logger.error("No principals found in keytab=" + keytab); - } - } else { - spnegoPrincipals = new String[] { principal }; - } - - if (nameRules != null) { - KerberosName.setRules(nameRules); - } - - boolean useKeytab = true; - if (!useKeytab) { - logger.info("Creating UGI with subject"); - LoginContext loginContext = null; - List loginContexts = new ArrayList(); - for (String spnegoPrincipal : spnegoPrincipals) { - try { - logger.info("Login using keytab " + keytab - + ", for principal " + spnegoPrincipal); - final KerberosConfiguration kerberosConfiguration = new KerberosConfiguration( - keytab, spnegoPrincipal); - loginContext = new LoginContext("", - serverSubject, null, kerberosConfiguration); - loginContext.login(); - successLoginCount++; - logger.info("Login success keytab " + keytab - + ", for principal " + spnegoPrincipal); - loginContexts.add(loginContext); - } catch (Throwable t) { - logger.error("Login failed keytab " + keytab - + ", for principal " + spnegoPrincipal, t); - } - if (successLoginCount > 0) { - logger.info("Total login success count=" - + successLoginCount); - try { - UserGroupInformation - .loginUserFromSubject(serverSubject); - // UserGroupInformation ugi = - // createUGIFromSubject(serverSubject); - // if (ugi != null) { - // setUGILoginUser(ugi, serverSubject); - // } - } catch (Throwable e) { - logger.error("Error creating UGI from subject. subject=" - + serverSubject); - } finally { - if (loginContext != null) { - loginContext.logout(); - } - } - } else { - logger.error("Total logins were successfull from keytab=" - + keytab + ", principal=" + principal); - } - } - } else { - logger.info("Creating UGI from keytab directly. keytab=" - + keytab + ", principal=" + spnegoPrincipals[0]); - UserGroupInformation ugi = UserGroupInformation - .loginUserFromKeytabAndReturnUGI(spnegoPrincipals[0], - keytab); - MiscUtil.setUGILoginUser(ugi, null); - } - - } catch (Throwable t) { - logger.error("Failed to login with given keytab and principal", t); - } - - } - - public static void loginWithKeyTab(String keytab, String principal, String nameRules) { - if (logger.isDebugEnabled()) { - logger.debug("==> MiscUtil.loginWithKeyTab() keytab= " + keytab + "principal= " + principal + "nameRules= " + nameRules); - } - - if (keytab == null || principal == null) { - logger.error("Failed to login as keytab or principal is null!"); - return; - } - - String[] spnegoPrincipals; - UserGroupInformation ugi; - - try { - if (principal.equals("*")) { - spnegoPrincipals = KerberosUtil.getPrincipalNames(keytab, Pattern.compile("HTTP/.*")); - if (spnegoPrincipals.length == 0) { - logger.error("No principals found in keytab= " + keytab); - } - } else { - spnegoPrincipals = new String[] { principal }; - } - - if (nameRules != null) { - KerberosName.setRules(nameRules); - } - - logger.info("Creating UGI from keytab directly. keytab= " + keytab + ", principal= " + spnegoPrincipals[0]); - ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipals[0], keytab); - MiscUtil.setUGILoginUser(ugi, null); - } catch (Exception e) { - logger.error("Failed to login with given keytab= " + keytab + "principal= " + principal + "nameRules= " + nameRules, e); - } - - if (logger.isDebugEnabled()) { - logger.debug("<== MiscUtil.loginWithKeyTab()"); - } - } - - static class LogHistory { - long lastLogTime = 0; - int counter = 0; - } - - /** - * Kerberos context configuration for the JDK GSS library. - */ - private static class KerberosConfiguration extends Configuration { - private String keytab; - private String principal; - - public KerberosConfiguration(String keytab, String principal) { - this.keytab = keytab; - this.principal = principal; - } - - @Override - public AppConfigurationEntry[] getAppConfigurationEntry(String name) { - Map options = new HashMap(); - if (IBM_JAVA) { - options.put("useKeytab", keytab.startsWith("file://") ? keytab - : "file://" + keytab); - options.put("principal", principal); - options.put("credsType", "acceptor"); - } else { - options.put("keyTab", keytab); - options.put("principal", principal); - options.put("useKeyTab", "true"); - options.put("storeKey", "true"); - options.put("doNotPrompt", "true"); - options.put("useTicketCache", "true"); - options.put("renewTGT", "true"); - options.put("isInitiator", "false"); - } - options.put("refreshKrb5Config", "true"); - String ticketCache = System.getenv("KRB5CCNAME"); - if (ticketCache != null) { - if (IBM_JAVA) { - options.put("useDefaultCcache", "true"); - // The first value searched when "useDefaultCcache" is used. - System.setProperty("KRB5CCNAME", ticketCache); - options.put("renewTGT", "true"); - options.put("credsType", "both"); - } else { - options.put("ticketCache", ticketCache); - } - } - if (logger.isDebugEnabled()) { - options.put("debug", "true"); - } - - return new AppConfigurationEntry[] { new AppConfigurationEntry( - KerberosUtil.getKrb5LoginModuleName(), - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - options), }; - } - } - - public static UserGroupInformation getLoginUser() throws IOException { - return UserGroupInformation.getLoginUser(); - } - - private static void initLocalHost() { - if ( logger.isDebugEnabled() ) { - logger.debug("==> MiscUtil.initLocalHost()"); - } - - try { - local_hostname = InetAddress.getLocalHost().getHostName(); - } catch (Throwable excp) { - logger.warn("getHostname()", excp); - } - if ( logger.isDebugEnabled() ) { - logger.debug("<== MiscUtil.initLocalHost()"); - } - } - public static Date getUTCDateForLocalDate(Date date) { - TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT+0"); - Calendar local = Calendar.getInstance(); - int offset = local.getTimeZone().getOffset(local.getTimeInMillis()); - GregorianCalendar utc = new GregorianCalendar(gmtTimeZone); - utc.setTimeInMillis(date.getTime()); - utc.add(Calendar.MILLISECOND, -offset); - return utc.getTime(); - } - public static Date getUTCDate() { - TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT+0"); - Calendar local = Calendar.getInstance(); - int offset = local.getTimeZone().getOffset(local.getTimeInMillis()); - GregorianCalendar utc = new GregorianCalendar(gmtTimeZone); - utc.setTimeInMillis(local.getTimeInMillis()); - utc.add(Calendar.MILLISECOND, -offset); - return utc.getTime(); - } - - // use Holder class to defer initialization until needed - private static class RandomHolder { - static final Random random = new Random(); - } - - // Utility methods - public static int toInt(Object value) { - if (value == null) { - return 0; - } - if (value instanceof Integer) { - return (Integer) value; - } - if (value.toString().isEmpty()) { - return 0; - } - try { - return Integer.valueOf(value.toString()); - } catch (Throwable t) { - logger.error("Error converting value to integer. Value = " + value, t); - } - return 0; - } - - public static long toLong(Object value) { - if (value == null) { - return 0; - } - if (value instanceof Long) { - return (Long) value; - } - if (value.toString().isEmpty()) { - return 0; - } - try { - return Long.valueOf(value.toString()); - } catch (Throwable t) { - logger.error("Error converting value to long. Value = " + value, t); - } - return 0; - } - - public static Date toDate(Object value) { - if (value == null) { - return null; - } - if (value instanceof Date) { - return (Date) value; - } - try { - // TODO: Do proper parsing based on Solr response value - return new Date(value.toString()); - } catch (Throwable t) { - logger.error("Error converting value to date. Value = " + value, t); - } - return null; - } - - public static Date toLocalDate(Object value) { - if (value == null) { - return null; - } - if (value instanceof Date) { - return (Date) value; - } - try { - LocalDateTime localDateTime = LocalDateTime.parse(value.toString(), DateTimeFormatter.ISO_DATE_TIME); - return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant()); - } catch (Throwable t) { - logger.error("Error converting value to date. Value = " + value, t); - } - return null; - } + LogHistory log = logHistoryList.computeIfAbsent(message, k -> new LogHistory()); + + if ((System.currentTimeMillis() - log.lastLogTime) > logInterval) { + log.lastLogTime = System.currentTimeMillis(); + + int counter = log.counter; + + log.counter = 0; + + if (counter > 0) { + message += ". Messages suppressed before: " + counter; + } + + if (e == null) { + useLogger.error(message); + } else { + useLogger.error(message, e); + } + + return true; + } else { + log.counter++; + } + + return false; + } + + public static void setUGIFromJAASConfig(String jaasConfigAppName) throws Exception { + String keytabFile = null; + String principal = null; + UserGroupInformation ugi = null; + + logger.debug("===> MiscUtil.setUGIFromJAASConfig() jaasConfigAppName: {}", jaasConfigAppName); + + try { + AppConfigurationEntry[] entries = Configuration.getConfiguration().getAppConfigurationEntry(jaasConfigAppName); + + if (!ArrayUtils.isEmpty(entries)) { + for (AppConfigurationEntry entry : entries) { + if (entry.getOptions().get("keyTab") != null) { + keytabFile = (String) entry.getOptions().get("keyTab"); + } + + if (entry.getOptions().get("principal") != null) { + principal = (String) entry.getOptions().get("principal"); + } + + if (!StringUtils.isEmpty(principal) && !StringUtils.isEmpty(keytabFile)) { + break; + } + } + + if (!StringUtils.isEmpty(principal) && !StringUtils.isEmpty(keytabFile)) { + // This will login and set the UGI + UserGroupInformation.loginUserFromKeytab(principal, keytabFile); + + ugi = UserGroupInformation.getLoginUser(); + } else { + String errorMesage = "Unable to get the principal/keytab from jaasConfigAppName: " + jaasConfigAppName; + + logger.error(errorMesage); + + throw new Exception(errorMesage); + } + + logger.info("MiscUtil.setUGIFromJAASConfig() UGI: {} principal: {} keytab: {}", ugi, principal, keytabFile); + } else { + logger.warn("JAASConfig file not found! Ranger Plugin will not working in a Secure Cluster..."); + } + } catch (Exception e) { + logger.error("Unable to set UGI for Principal: {} keytab: {}", principal, keytabFile); + + throw e; + } + + logger.debug("<=== MiscUtil.setUGIFromJAASConfig() jaasConfigAppName: {} UGI: {} principal: {} keytab: {}", jaasConfigAppName, ugi, principal, keytabFile); + } + + public static void authWithKerberos(String keytab, String principal, String nameRules) { + if (keytab == null || principal == null) { + return; + } + + Subject serverSubject = new Subject(); + int successLoginCount = 0; + String[] spnegoPrincipals; + + try { + if (principal.equals("*")) { + spnegoPrincipals = KerberosUtil.getPrincipalNames(keytab, Pattern.compile("HTTP/.*")); + + if (spnegoPrincipals.length == 0) { + logger.error("No principals found in keytab={}", keytab); + } + } else { + spnegoPrincipals = new String[] {principal}; + } + + if (nameRules != null) { + KerberosName.setRules(nameRules); + } + + boolean useKeytab = true; + + if (!useKeytab) { + logger.info("Creating UGI with subject"); + + LoginContext loginContext = null; + List loginContexts = new ArrayList<>(); + + for (String spnegoPrincipal : spnegoPrincipals) { + try { + logger.info("Login using keytab {}, for principal {}", keytab, spnegoPrincipal); + + final KerberosConfiguration kerberosConfiguration = new KerberosConfiguration(keytab, spnegoPrincipal); + + loginContext = new LoginContext("", serverSubject, null, kerberosConfiguration); + + loginContext.login(); + successLoginCount++; + + logger.info("Login success keytab {}, for principal {}", keytab, spnegoPrincipal); + + loginContexts.add(loginContext); + } catch (Throwable t) { + logger.error("Login failed keytab {}, for principal {}", keytab, spnegoPrincipal, t); + } + + if (successLoginCount > 0) { + logger.info("Total login success count={}", successLoginCount); + + try { + UserGroupInformation.loginUserFromSubject(serverSubject); + // UserGroupInformation ugi = createUGIFromSubject(serverSubject); + // if (ugi != null) { + // setUGILoginUser(ugi, serverSubject); + // } + } catch (Throwable e) { + logger.error("Error creating UGI from subject. subject={}", serverSubject); + } finally { + if (loginContext != null) { + loginContext.logout(); + } + } + } else { + logger.error("Total logins were successfull from keytab={}, principal={}", keytab, principal); + } + } + } else { + logger.info("Creating UGI from keytab directly. keytab={}, principal={}", keytab, spnegoPrincipals[0]); + + UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipals[0], keytab); + + MiscUtil.setUGILoginUser(ugi, null); + } + } catch (Throwable t) { + logger.error("Failed to login with given keytab and principal", t); + } + } + + public static void loginWithKeyTab(String keytab, String principal, String nameRules) { + logger.debug("==> MiscUtil.loginWithKeyTab() keytab={} principal={} nameRules={}}", keytab, principal, nameRules); + + if (keytab == null || principal == null) { + logger.error("Failed to login as keytab or principal is null!"); + + return; + } + + String[] spnegoPrincipals; + UserGroupInformation ugi; + + try { + if (principal.equals("*")) { + spnegoPrincipals = KerberosUtil.getPrincipalNames(keytab, Pattern.compile("HTTP/.*")); + + if (spnegoPrincipals.length == 0) { + logger.error("No principals found in keytab= {}", keytab); + } + } else { + spnegoPrincipals = new String[] {principal}; + } + + if (nameRules != null) { + KerberosName.setRules(nameRules); + } + + logger.info("Creating UGI from keytab directly. keytab={}, principal={}", keytab, spnegoPrincipals[0]); + + ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipals[0], keytab); + + MiscUtil.setUGILoginUser(ugi, null); + } catch (Exception e) { + logger.error("Failed to login with given keytab={} principal={} nameRules={}", keytab, principal, nameRules, e); + } + + logger.debug("<== MiscUtil.loginWithKeyTab()"); + } + + public static UserGroupInformation getLoginUser() throws IOException { + return UserGroupInformation.getLoginUser(); + } + + public static Date getUTCDateForLocalDate(Date date) { + TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT+0"); + Calendar local = Calendar.getInstance(); + int offset = local.getTimeZone().getOffset(local.getTimeInMillis()); + GregorianCalendar utc = new GregorianCalendar(gmtTimeZone); + + utc.setTimeInMillis(date.getTime()); + utc.add(Calendar.MILLISECOND, -offset); + + return utc.getTime(); + } + + public static Date getUTCDate() { + TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT+0"); + Calendar local = Calendar.getInstance(); + int offset = local.getTimeZone().getOffset(local.getTimeInMillis()); + GregorianCalendar utc = new GregorianCalendar(gmtTimeZone); + + utc.setTimeInMillis(local.getTimeInMillis()); + utc.add(Calendar.MILLISECOND, -offset); + + return utc.getTime(); + } + + // Utility methods + public static int toInt(Object value) { + if (value == null) { + return 0; + } + + if (value instanceof Integer) { + return (Integer) value; + } + + if (value.toString().isEmpty()) { + return 0; + } + + try { + return Integer.parseInt(value.toString()); + } catch (Throwable t) { + logger.error("Error converting value to integer. Value={}", value, t); + } + + return 0; + } + + public static long toLong(Object value) { + if (value == null) { + return 0; + } + + if (value instanceof Long) { + return (Long) value; + } + + if (value.toString().isEmpty()) { + return 0; + } + + try { + return Long.parseLong(value.toString()); + } catch (Throwable t) { + logger.error("Error converting value to long. Value={}", value, t); + } + + return 0; + } + + public static Date toDate(Object value) { + if (value == null) { + return null; + } + + if (value instanceof Date) { + return (Date) value; + } + + try { + // TODO: Do proper parsing based on Solr response value + return new Date(value.toString()); + } catch (Throwable t) { + logger.error("Error converting value to date. Value={}", value, t); + } + + return null; + } + + public static Date toLocalDate(Object value) { + if (value == null) { + return null; + } + + if (value instanceof Date) { + return (Date) value; + } + + try { + LocalDateTime localDateTime = LocalDateTime.parse(value.toString(), DateTimeFormatter.ISO_DATE_TIME); + + return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant()); + } catch (Throwable t) { + logger.error("Error converting value to date. Value={}", value, t); + } + + return null; + } + + private static void initLocalHost() { + logger.debug("==> MiscUtil.initLocalHost()"); + + try { + localHostname = InetAddress.getLocalHost().getHostName(); + } catch (Throwable excp) { + logger.warn("getHostname()", excp); + } + + logger.debug("<== MiscUtil.initLocalHost()"); + } + + static class LogHistory { + long lastLogTime; + int counter; + } + + /** + * Kerberos context configuration for the JDK GSS library. + */ + private static class KerberosConfiguration extends Configuration { + private final String keytab; + private final String principal; + + public KerberosConfiguration(String keytab, String principal) { + this.keytab = keytab; + this.principal = principal; + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String name) { + Map options = new HashMap<>(); + + if (IBM_JAVA) { + options.put("useKeytab", keytab.startsWith("file://") ? keytab : "file://" + keytab); + options.put("principal", principal); + options.put("credsType", "acceptor"); + } else { + options.put("keyTab", keytab); + options.put("principal", principal); + options.put("useKeyTab", "true"); + options.put("storeKey", "true"); + options.put("doNotPrompt", "true"); + options.put("useTicketCache", "true"); + options.put("renewTGT", "true"); + options.put("isInitiator", "false"); + } + + options.put("refreshKrb5Config", "true"); + + String ticketCache = System.getenv("KRB5CCNAME"); + + if (ticketCache != null) { + if (IBM_JAVA) { + options.put("useDefaultCcache", "true"); + // The first value searched when "useDefaultCcache" is used. + System.setProperty("KRB5CCNAME", ticketCache); + options.put("renewTGT", "true"); + options.put("credsType", "both"); + } else { + options.put("ticketCache", ticketCache); + } + } + + if (logger.isDebugEnabled()) { + options.put("debug", "true"); + } + + return new AppConfigurationEntry[] { + new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options), + }; + } + } + + // use Holder class to defer initialization until needed + private static class RandomHolder { + static final Random random = new Random(); + } + static { + initLocalHost(); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java index 5ac8c0ee03..28df08521f 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,219 +17,212 @@ */ package org.apache.ranger.audit.provider; +import org.apache.ranger.audit.model.AuditEventBase; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.File; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Properties; -import org.apache.ranger.audit.model.AuditEventBase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class MultiDestAuditProvider extends BaseAuditHandler { - - private static final Logger LOG = LoggerFactory - .getLogger(MultiDestAuditProvider.class); - - protected List mProviders = new ArrayList(); - static final String DEFAULT_NAME = "multi_dest"; - - public MultiDestAuditProvider() { - LOG.info("MultiDestAuditProvider: creating.."); - setName(DEFAULT_NAME); - } - - public MultiDestAuditProvider(AuditHandler provider) { - LOG.info("MultiDestAuditProvider(): provider=" - + (provider == null ? null : provider.getName())); - setName(DEFAULT_NAME); - addAuditProvider(provider); - } - - @Override - public void init(Properties props) { - LOG.info("MultiDestAuditProvider.init()"); - - super.init(props); - - for (AuditHandler provider : mProviders) { - try { - provider.init(props); - } catch (Throwable excp) { - LOG.info("MultiDestAuditProvider.init(): failed " - + provider.getClass().getCanonicalName() + ")", excp); - } - } - } - - @Override - public void setParentPath(String parentPath) { - super.setParentPath(parentPath); - for (AuditHandler provider : mProviders) { - if (provider instanceof BaseAuditHandler) { - BaseAuditHandler baseAuditHander = (BaseAuditHandler) provider; - baseAuditHander.setParentPath(getName()); - } - } - } - - @Override - public void setName(String name) { - super.setName(name); - for (AuditHandler provider : mProviders) { - if (provider instanceof BaseAuditHandler) { - BaseAuditHandler baseAuditHander = (BaseAuditHandler) provider; - baseAuditHander.setParentPath(getName()); - } - } - } - - public void addAuditProvider(AuditHandler provider) { - if (provider != null) { - if(LOG.isDebugEnabled()) { - LOG.debug("MultiDestAuditProvider.addAuditProvider(providerType=" - + provider.getClass().getCanonicalName() + ")"); - } - - mProviders.add(provider); - if (provider instanceof BaseAuditHandler) { - BaseAuditHandler baseAuditHander = (BaseAuditHandler) provider; - baseAuditHander.setParentPath(getName()); - } - } - } - - public void addAuditProviders(List providers) { - if (providers != null) { - for (AuditHandler provider : providers) { - LOG.info("Adding " + provider.getName() - + " as consumer to MultiDestination " + getName()); - addAuditProvider(provider); - } - } - } - - @Override - public boolean log(AuditEventBase event) { - for (AuditHandler provider : mProviders) { - try { - provider.log(event); - } catch (Throwable excp) { - logFailedEvent(event, excp); - } - } - return true; - } - - @Override - public boolean log(Collection events) { - for (AuditHandler provider : mProviders) { - try { - provider.log(events); - } catch (Throwable excp) { - logFailedEvent(events, excp); - } - } - return true; - } - - @Override - public boolean logJSON(String event) { - for (AuditHandler provider : mProviders) { - try { - provider.logJSON(event); - } catch (Throwable excp) { - logFailedEventJSON(event, excp); - } - } - return true; - } - - @Override - public boolean logJSON(Collection events) { - for (AuditHandler provider : mProviders) { - try { - provider.logJSON(events); - } catch (Throwable excp) { - logFailedEventJSON(events, excp); - } - } - return true; - } - - - @Override - public boolean logFile(File file) { - for (AuditHandler provider : mProviders) { - try { - provider.logFile(file); - } catch (Throwable excp) { - logFailedEventJSON(file.getAbsolutePath(), excp); - } - } - return true; - } - - @Override - public void start() { - for (AuditHandler provider : mProviders) { - try { - provider.start(); - } catch (Throwable excp) { - LOG.error("MultiDestAuditProvider.start(): failed for provider { " - + provider.getClass().getName() + " }", excp); - } - } - } - - @Override - public void stop() { - for (AuditHandler provider : mProviders) { - try { - provider.stop(); - } catch (Throwable excp) { - LOG.error("MultiDestAuditProvider.stop(): failed for provider { " - + provider.getClass().getName() + " }", excp); - } - } - } - - @Override - public void waitToComplete() { - for (AuditHandler provider : mProviders) { - try { - provider.waitToComplete(); - } catch (Throwable excp) { - LOG.error( - "MultiDestAuditProvider.waitToComplete(): failed for provider { " - + provider.getClass().getName() + " }", excp); - } - } - } - - @Override - public void waitToComplete(long timeout) { - for (AuditHandler provider : mProviders) { - try { - provider.waitToComplete(timeout); - } catch (Throwable excp) { - LOG.error( - "MultiDestAuditProvider.waitToComplete(): failed for provider { " - + provider.getClass().getName() + " }", excp); - } - } - } - - @Override - public void flush() { - for (AuditHandler provider : mProviders) { - try { - provider.flush(); - } catch (Throwable excp) { - LOG.error("MultiDestAuditProvider.flush(): failed for provider { " - + provider.getClass().getName() + " }", excp); - } - } - } + private static final Logger LOG = LoggerFactory.getLogger(MultiDestAuditProvider.class); + + static final String DEFAULT_NAME = "multi_dest"; + + protected List mProviders = new ArrayList<>(); + + public MultiDestAuditProvider() { + LOG.info("MultiDestAuditProvider: creating.."); + + setName(DEFAULT_NAME); + } + + public MultiDestAuditProvider(AuditHandler provider) { + LOG.info("MultiDestAuditProvider(): provider={}", (provider == null ? null : provider.getName())); + + setName(DEFAULT_NAME); + addAuditProvider(provider); + } + + public void addAuditProvider(AuditHandler provider) { + if (provider != null) { + LOG.debug("MultiDestAuditProvider.addAuditProvider(providerType={})", provider.getClass().getCanonicalName()); + + mProviders.add(provider); + + if (provider instanceof BaseAuditHandler) { + ((BaseAuditHandler) provider).setParentPath(getName()); + } + } + } + + public void addAuditProviders(List providers) { + if (providers != null) { + for (AuditHandler provider : providers) { + LOG.info("Adding {} as consumer to MultiDestination {}", provider.getName(), getName()); + + addAuditProvider(provider); + } + } + } + + @Override + public boolean log(AuditEventBase event) { + for (AuditHandler provider : mProviders) { + try { + provider.log(event); + } catch (Throwable excp) { + logFailedEvent(event, excp); + } + } + + return true; + } + + @Override + public boolean logJSON(String event) { + for (AuditHandler provider : mProviders) { + try { + provider.logJSON(event); + } catch (Throwable excp) { + logFailedEventJSON(event, excp); + } + } + + return true; + } + + @Override + public boolean logJSON(Collection events) { + for (AuditHandler provider : mProviders) { + try { + provider.logJSON(events); + } catch (Throwable excp) { + logFailedEventJSON(events, excp); + } + } + + return true; + } + + @Override + public boolean logFile(File file) { + for (AuditHandler provider : mProviders) { + try { + provider.logFile(file); + } catch (Throwable excp) { + logFailedEventJSON(file.getAbsolutePath(), excp); + } + } + + return true; + } + + @Override + public void init(Properties props) { + LOG.info("MultiDestAuditProvider.init()"); + + super.init(props); + + for (AuditHandler provider : mProviders) { + try { + provider.init(props); + } catch (Throwable excp) { + LOG.info("MultiDestAuditProvider.init(): failed {}", provider.getClass().getCanonicalName(), excp); + } + } + } + + @Override + public void setName(String name) { + super.setName(name); + + for (AuditHandler provider : mProviders) { + if (provider instanceof BaseAuditHandler) { + ((BaseAuditHandler) provider).setParentPath(getName()); + } + } + } + + @Override + public void setParentPath(String parentPath) { + super.setParentPath(parentPath); + + for (AuditHandler provider : mProviders) { + if (provider instanceof BaseAuditHandler) { + ((BaseAuditHandler) provider).setParentPath(getName()); + } + } + } + + @Override + public boolean log(Collection events) { + for (AuditHandler provider : mProviders) { + try { + provider.log(events); + } catch (Throwable excp) { + logFailedEvent(events, excp); + } + } + + return true; + } + + @Override + public void start() { + for (AuditHandler provider : mProviders) { + try { + provider.start(); + } catch (Throwable excp) { + LOG.error("MultiDestAuditProvider.start(): failed for provider { {} }", provider.getClass().getName(), excp); + } + } + } + + @Override + public void stop() { + for (AuditHandler provider : mProviders) { + try { + provider.stop(); + } catch (Throwable excp) { + LOG.error("MultiDestAuditProvider.stop(): failed for provider { {} }", provider.getClass().getName(), excp); + } + } + } + + @Override + public void waitToComplete() { + for (AuditHandler provider : mProviders) { + try { + provider.waitToComplete(); + } catch (Throwable excp) { + LOG.error("MultiDestAuditProvider.waitToComplete(): failed for provider { {} }", provider.getClass().getName(), excp); + } + } + } + + @Override + public void waitToComplete(long timeout) { + for (AuditHandler provider : mProviders) { + try { + provider.waitToComplete(timeout); + } catch (Throwable excp) { + LOG.error("MultiDestAuditProvider.waitToComplete(): failed for provider { {} }", provider.getClass().getName(), excp); + } + } + } + + @Override + public void flush() { + for (AuditHandler provider : mProviders) { + try { + provider.flush(); + } catch (Throwable excp) { + LOG.error("MultiDestAuditProvider.flush(): failed for provider { {} }", provider.getClass().getName(), excp); + } + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java index 5ed77da538..a5ba7bd721 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,25 +22,30 @@ import org.slf4j.LoggerFactory; public class StandAloneAuditProviderFactory extends AuditProviderFactory { - private static final Logger LOG = LoggerFactory.getLogger(StandAloneAuditProviderFactory.class); - - private volatile static StandAloneAuditProviderFactory sFactory = null; - - public static StandAloneAuditProviderFactory getInstance() { - StandAloneAuditProviderFactory ret = sFactory; - if(ret == null) { - synchronized(StandAloneAuditProviderFactory.class) { - ret = sFactory; - if(ret == null) { - ret = sFactory = new StandAloneAuditProviderFactory(); - } - } - } - return ret; - } - - private StandAloneAuditProviderFactory() { - super(); - LOG.info("StandAloneAuditProviderFactory: created.."); - } + private static final Logger LOG = LoggerFactory.getLogger(StandAloneAuditProviderFactory.class); + + private static volatile StandAloneAuditProviderFactory sFactory; + + private StandAloneAuditProviderFactory() { + super(); + + LOG.info("StandAloneAuditProviderFactory: created.."); + } + + public static StandAloneAuditProviderFactory getInstance() { + StandAloneAuditProviderFactory ret = sFactory; + + if (ret == null) { + synchronized (StandAloneAuditProviderFactory.class) { + ret = sFactory; + + if (ret == null) { + ret = new StandAloneAuditProviderFactory(); + sFactory = ret; + } + } + } + + return ret; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java index 83ff017081..869bed5af1 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java @@ -16,9 +16,6 @@ */ package org.apache.ranger.audit.provider.hdfs; -import java.util.Map; -import java.util.Properties; - import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.provider.BufferedAuditProvider; import org.apache.ranger.audit.provider.DebugTracer; @@ -28,67 +25,68 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Map; +import java.util.Properties; + public class HdfsAuditProvider extends BufferedAuditProvider { - private static final Logger LOG = LoggerFactory.getLogger(HdfsAuditProvider.class); - - public static final String AUDIT_HDFS_IS_ASYNC_PROP = "xasecure.audit.hdfs.is.async"; - public static final String AUDIT_HDFS_MAX_QUEUE_SIZE_PROP = "xasecure.audit.hdfs.async.max.queue.size"; - public static final String AUDIT_HDFS_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.hdfs.async.max.flush.interval.ms"; - - public HdfsAuditProvider() { - } - - public void init(Properties props) { - LOG.info("HdfsAuditProvider.init()"); - - super.init(props); - - Map hdfsProps = MiscUtil.getPropertiesWithPrefix(props, "xasecure.audit.hdfs.config."); - - String encoding = hdfsProps.get("encoding"); - - String hdfsDestinationDirectory = hdfsProps.get("destination.directory"); - String hdfsDestinationFile = hdfsProps.get("destination.file"); - int hdfsDestinationFlushIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.flush.interval.seconds"), 15 * 60); - int hdfsDestinationRolloverIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.rollover.interval.seconds"), 24 * 60 * 60); - int hdfsDestinationOpenRetryIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.open.retry.interval.seconds"), 60); - - String localFileBufferDirectory = hdfsProps.get("local.buffer.directory"); - String localFileBufferFile = hdfsProps.get("local.buffer.file"); - int localFileBufferFlushIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("local.buffer.flush.interval.seconds"), 1 * 60); - int localFileBufferFileBufferSizeBytes = MiscUtil.parseInteger(hdfsProps.get("local.buffer.file.buffer.size.bytes"), 8 * 1024); - int localFileBufferRolloverIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("local.buffer.rollover.interval.seconds"), 10 * 60); - String localFileBufferArchiveDirectory = hdfsProps.get("local.archive.directory"); - int localFileBufferArchiveFileCount = MiscUtil.parseInteger(hdfsProps.get("local.archive.max.file.count"), 10); - // Added for Azure. Note that exact name of these properties is not known as it contains the variable account name in it. - Map configProps = MiscUtil.getPropertiesWithPrefix(props, "xasecure.audit.destination.hdfs.config."); - - DebugTracer tracer = new Log4jTracer(LOG); - - HdfsLogDestination mHdfsDestination = new HdfsLogDestination(tracer); - - mHdfsDestination.setDirectory(hdfsDestinationDirectory); - mHdfsDestination.setFile(hdfsDestinationFile); - mHdfsDestination.setFlushIntervalSeconds(hdfsDestinationFlushIntervalSeconds); - mHdfsDestination.setEncoding(encoding); - mHdfsDestination.setRolloverIntervalSeconds(hdfsDestinationRolloverIntervalSeconds); - mHdfsDestination.setOpenRetryIntervalSeconds(hdfsDestinationOpenRetryIntervalSeconds); - mHdfsDestination.setConfigProps(configProps); - - LocalFileLogBuffer mLocalFileBuffer = new LocalFileLogBuffer(tracer); - - mLocalFileBuffer.setDirectory(localFileBufferDirectory); - mLocalFileBuffer.setFile(localFileBufferFile); - mLocalFileBuffer.setFlushIntervalSeconds(localFileBufferFlushIntervalSeconds); - mLocalFileBuffer.setFileBufferSizeBytes(localFileBufferFileBufferSizeBytes); - mLocalFileBuffer.setEncoding(encoding); - mLocalFileBuffer.setRolloverIntervalSeconds(localFileBufferRolloverIntervalSeconds); - mLocalFileBuffer.setArchiveDirectory(localFileBufferArchiveDirectory); - mLocalFileBuffer.setArchiveFileCount(localFileBufferArchiveFileCount); - - setBufferAndDestination(mLocalFileBuffer, mHdfsDestination); - } -} + private static final Logger LOG = LoggerFactory.getLogger(HdfsAuditProvider.class); + + public static final String AUDIT_HDFS_IS_ASYNC_PROP = "xasecure.audit.hdfs.is.async"; + public static final String AUDIT_HDFS_MAX_QUEUE_SIZE_PROP = "xasecure.audit.hdfs.async.max.queue.size"; + public static final String AUDIT_HDFS_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.hdfs.async.max.flush.interval.ms"; + + public HdfsAuditProvider() { + } + + public void init(Properties props) { + LOG.info("HdfsAuditProvider.init()"); + super.init(props); + Map hdfsProps = MiscUtil.getPropertiesWithPrefix(props, "xasecure.audit.hdfs.config."); + String encoding = hdfsProps.get("encoding"); + + String hdfsDestinationDirectory = hdfsProps.get("destination.directory"); + String hdfsDestinationFile = hdfsProps.get("destination.file"); + int hdfsDestinationFlushIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.flush.interval.seconds"), 15 * 60); + int hdfsDestinationRolloverIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.rollover.interval.seconds"), 24 * 60 * 60); + int hdfsDestinationOpenRetryIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("destination.open.retry.interval.seconds"), 60); + + String localFileBufferDirectory = hdfsProps.get("local.buffer.directory"); + String localFileBufferFile = hdfsProps.get("local.buffer.file"); + int localFileBufferFlushIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("local.buffer.flush.interval.seconds"), 1 * 60); + int localFileBufferFileBufferSizeBytes = MiscUtil.parseInteger(hdfsProps.get("local.buffer.file.buffer.size.bytes"), 8 * 1024); + int localFileBufferRolloverIntervalSeconds = MiscUtil.parseInteger(hdfsProps.get("local.buffer.rollover.interval.seconds"), 10 * 60); + String localFileBufferArchiveDirectory = hdfsProps.get("local.archive.directory"); + int localFileBufferArchiveFileCount = MiscUtil.parseInteger(hdfsProps.get("local.archive.max.file.count"), 10); + + // Added for Azure. Note that exact name of these properties is not known as it contains the variable account name in it. + Map configProps = MiscUtil.getPropertiesWithPrefix(props, "xasecure.audit.destination.hdfs.config."); + + DebugTracer tracer = new Log4jTracer(LOG); + + HdfsLogDestination mHdfsDestination = new HdfsLogDestination<>(tracer); + + mHdfsDestination.setDirectory(hdfsDestinationDirectory); + mHdfsDestination.setFile(hdfsDestinationFile); + mHdfsDestination.setFlushIntervalSeconds(hdfsDestinationFlushIntervalSeconds); + mHdfsDestination.setEncoding(encoding); + mHdfsDestination.setRolloverIntervalSeconds(hdfsDestinationRolloverIntervalSeconds); + mHdfsDestination.setOpenRetryIntervalSeconds(hdfsDestinationOpenRetryIntervalSeconds); + mHdfsDestination.setConfigProps(configProps); + + LocalFileLogBuffer mLocalFileBuffer = new LocalFileLogBuffer<>(tracer); + + mLocalFileBuffer.setDirectory(localFileBufferDirectory); + mLocalFileBuffer.setFile(localFileBufferFile); + mLocalFileBuffer.setFlushIntervalSeconds(localFileBufferFlushIntervalSeconds); + mLocalFileBuffer.setFileBufferSizeBytes(localFileBufferFileBufferSizeBytes); + mLocalFileBuffer.setEncoding(encoding); + mLocalFileBuffer.setRolloverIntervalSeconds(localFileBufferRolloverIntervalSeconds); + mLocalFileBuffer.setArchiveDirectory(localFileBufferArchiveDirectory); + mLocalFileBuffer.setArchiveFileCount(localFileBufferArchiveFileCount); + + setBufferAndDestination(mLocalFileBuffer, mHdfsDestination); + } +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsLogDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsLogDestination.java index be31a9ad24..48f6073314 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsLogDestination.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsLogDestination.java @@ -18,14 +18,6 @@ */ package org.apache.ranger.audit.provider.hdfs; - -import java.io.IOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.util.Map; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -36,482 +28,492 @@ import org.apache.ranger.audit.provider.LogDestination; import org.apache.ranger.audit.provider.MiscUtil; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.util.Map; + public class HdfsLogDestination implements LogDestination { - public final static String EXCP_MSG_FILESYSTEM_CLOSED = "Filesystem closed"; - - private String name = getClass().getName(); - - private String mDirectory = null; - private String mFile = null; - private int mFlushIntervalSeconds = 1 * 60; - private String mEncoding = null; - private boolean mIsAppend = false; - private int mRolloverIntervalSeconds = 24 * 60 * 60; - private int mOpenRetryIntervalSeconds = 60; - private DebugTracer mLogger = null; - - private FSDataOutputStream mFsDataOutStream = null; - private OutputStreamWriter mWriter = null; - private String mHdfsFilename = null; - private long mNextRolloverTime = 0; - private long mNextFlushTime = 0; - private long mLastOpenFailedTime = 0; - private boolean mIsStopInProgress = false; - private Map configProps = null; - - public HdfsLogDestination(DebugTracer tracer) { - mLogger = tracer; - } - - - public void setName(String name) { - this.name = name; - } - - - /* (non-Javadoc) - * @see org.apache.ranger.audit.provider.LogDestination#getName() - */ - @Override - public String getName() { - return name; - } - - public String getDirectory() { - return mDirectory; - } - - public void setDirectory(String directory) { - this.mDirectory = directory; - } - - public String getFile() { - return mFile; - } - - public void setFile(String file) { - this.mFile = file; - } - - public int getFlushIntervalSeconds() { - return mFlushIntervalSeconds; - } - - public void setFlushIntervalSeconds(int flushIntervalSeconds) { - mFlushIntervalSeconds = flushIntervalSeconds; - } - - public String getEncoding() { - return mEncoding; - } - - public void setEncoding(String encoding) { - mEncoding = encoding; - } - - public int getRolloverIntervalSeconds() { - return mRolloverIntervalSeconds; - } - - public void setRolloverIntervalSeconds(int rolloverIntervalSeconds) { - this.mRolloverIntervalSeconds = rolloverIntervalSeconds; - } - - public int getOpenRetryIntervalSeconds() { - return mOpenRetryIntervalSeconds; - } - - public void setOpenRetryIntervalSeconds(int minIntervalOpenRetrySeconds) { - this.mOpenRetryIntervalSeconds = minIntervalOpenRetrySeconds; - } - - @Override - public void start() { - mLogger.debug("==> HdfsLogDestination.start()"); - - openFile(); - - mLogger.debug("<== HdfsLogDestination.start()"); - } - - @Override - public void stop() { - mLogger.debug("==> HdfsLogDestination.stop()"); - - mIsStopInProgress = true; - - closeFile(); - - mIsStopInProgress = false; - - mLogger.debug("<== HdfsLogDestination.stop()"); - } - - @Override - public boolean isAvailable() { - return mWriter != null; - } - - @Override - public boolean send(AuditEventBase log) { - boolean ret = true; - - if(log != null) { - String msg = MiscUtil.stringify(log); - - ret = sendStringified(msg); - } + public static final String EXCP_MSG_FILESYSTEM_CLOSED = "Filesystem closed"; + + private String name = getClass().getName(); + + private String mDirectory; + private String mFile; + private int mFlushIntervalSeconds = 1 * 60; + private String mEncoding; + private boolean mIsAppend; + private int mRolloverIntervalSeconds = 24 * 60 * 60; + private int mOpenRetryIntervalSeconds = 60; + private final DebugTracer mLogger; + + private FSDataOutputStream mFsDataOutStream; + private OutputStreamWriter mWriter; + private String mHdfsFilename; + private long mNextRolloverTime; + private long mNextFlushTime; + private long mLastOpenFailedTime; + private boolean mIsStopInProgress; + private Map configProps; + + public HdfsLogDestination(DebugTracer tracer) { + mLogger = tracer; + } + + public String getDirectory() { + return mDirectory; + } + + public void setDirectory(String directory) { + this.mDirectory = directory; + } + + public String getFile() { + return mFile; + } + + public void setFile(String file) { + this.mFile = file; + } + + public int getFlushIntervalSeconds() { + return mFlushIntervalSeconds; + } + + public void setFlushIntervalSeconds(int flushIntervalSeconds) { + mFlushIntervalSeconds = flushIntervalSeconds; + } + + public String getEncoding() { + return mEncoding; + } + + public void setEncoding(String encoding) { + mEncoding = encoding; + } + + public int getRolloverIntervalSeconds() { + return mRolloverIntervalSeconds; + } + + public void setRolloverIntervalSeconds(int rolloverIntervalSeconds) { + this.mRolloverIntervalSeconds = rolloverIntervalSeconds; + } + + public int getOpenRetryIntervalSeconds() { + return mOpenRetryIntervalSeconds; + } + + public void setOpenRetryIntervalSeconds(int minIntervalOpenRetrySeconds) { + this.mOpenRetryIntervalSeconds = minIntervalOpenRetrySeconds; + } + + @Override + public void start() { + mLogger.debug("==> HdfsLogDestination.start()"); + + openFile(); + + mLogger.debug("<== HdfsLogDestination.start()"); + } + + @Override + public void stop() { + mLogger.debug("==> HdfsLogDestination.stop()"); + + mIsStopInProgress = true; + + closeFile(); + + mIsStopInProgress = false; + + mLogger.debug("<== HdfsLogDestination.stop()"); + } + + @Override + public boolean isAvailable() { + return mWriter != null; + } + + @Override + public boolean send(AuditEventBase log) { + boolean ret = true; + + if (log != null) { + String msg = MiscUtil.stringify(log); + + ret = sendStringified(msg); + } + + return ret; + } + + @Override + public boolean send(AuditEventBase[] logs) { + for (AuditEventBase log : logs) { + boolean ret = send(log); + + if (!ret) { + return ret; + } + } + + return true; + } + + @Override + public boolean sendStringified(String log) { + boolean ret = false; + + checkFileStatus(); + + OutputStreamWriter writer = mWriter; + + if (writer != null) { + try { + writer.write(log + MiscUtil.LINE_SEPARATOR); + + ret = true; + } catch (IOException excp) { + mLogger.warn("HdfsLogDestination.sendStringified(): write failed", excp); + + closeFile(); + } + } + + return ret; + } + + @Override + public boolean sendStringified(String[] logs) { + for (String log : logs) { + boolean ret = sendStringified(log); + + if (!ret) { + return ret; + } + } + + return true; + } + + @Override + public boolean flush() { + mLogger.debug("==> HdfsLogDestination.flush()"); + + boolean ret = false; + + OutputStreamWriter writer = mWriter; + + if (writer != null) { + try { + writer.flush(); + + ret = true; + } catch (IOException excp) { + logException("HdfsLogDestination: flush() failed", excp); + } + } + + FSDataOutputStream ostream = mFsDataOutStream; + + if (ostream != null) { + try { + ostream.hflush(); + + ret = true; + } catch (IOException excp) { + logException("HdfsLogDestination: hflush() failed", excp); + } + } + + if (ret) { + mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); + } + + mLogger.debug("<== HdfsLogDestination.flush()"); + + return ret; + } + + /* (non-Javadoc) + * @see org.apache.ranger.audit.provider.LogDestination#getName() + */ + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("HdfsLogDestination {"); + sb.append("Directory=").append(mDirectory).append("; "); + sb.append("File=").append(mFile).append("; "); + sb.append("RolloverIntervalSeconds=").append(mRolloverIntervalSeconds); + sb.append("}"); + + return sb.toString(); + } + + public void setConfigProps(Map configProps) { + this.configProps = configProps; + } + + Configuration createConfiguration() { + Configuration conf = new Configuration(); + + if (configProps != null) { + for (Map.Entry entry : configProps.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + + // for ease of install config file may contain properties with empty value, skip those + if (StringUtils.isNotEmpty(value)) { + conf.set(key, value); + } + + mLogger.info("Adding property to HDFS config: " + key + " => " + value); + } + } + + mLogger.info("Returning HDFS Filesystem Config: " + conf); + + return conf; + } + + private void openFile() { + mLogger.debug("==> HdfsLogDestination.openFile()"); + + closeFile(); + + mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); + + long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); + + mHdfsFilename = MiscUtil.replaceTokens(mDirectory + Path.SEPARATOR + mFile, startTime); + + FileSystem fileSystem; + Path pathLogfile; + Configuration conf; + FSDataOutputStream ostream = null; + boolean bOverwrite = false; - return ret; - } - - - @Override - public boolean send(AuditEventBase[] logs) { - for (AuditEventBase log : logs) { - boolean ret = send(log); - if(!ret) { - return ret; - } - } - return true; - } + try { + mLogger.debug("HdfsLogDestination.openFile(): opening file " + mHdfsFilename); - @Override - public boolean sendStringified(String log) { - boolean ret = false; + URI uri = URI.create(mHdfsFilename); - checkFileStatus(); + // TODO: mechanism to XA-HDFS plugin to disable auditing of access checks to the current HDFS file - OutputStreamWriter writer = mWriter; + conf = createConfiguration(); + pathLogfile = new Path(mHdfsFilename); + fileSystem = FileSystem.get(uri, conf); - if(writer != null) { - try { - writer.write(log + MiscUtil.LINE_SEPARATOR); + try { + if (fileSystem.exists(pathLogfile)) { // file already exists. either append to the file or write to a new file + if (mIsAppend) { + mLogger.info("HdfsLogDestination.openFile(): opening file for append " + mHdfsFilename); - ret = true; - } catch (IOException excp) { - mLogger.warn("HdfsLogDestination.sendStringified(): write failed", excp); + ostream = fileSystem.append(pathLogfile); + } else { + mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem); + pathLogfile = new Path(mHdfsFilename); + } + } - closeFile(); - } - } + // if file does not exist or if mIsAppend==false, create the file + if (ostream == null) { + mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename); - return ret; - } + createParents(pathLogfile, fileSystem); + ostream = fileSystem.create(pathLogfile, bOverwrite); + } + } catch (IOException excp) { + // append may not be supported by the filesystem; or the file might already be open by another application. Try a different filename + String failedFilename = mHdfsFilename; - @Override - public boolean sendStringified(String[] logs) { - for (String log : logs) { - boolean ret = sendStringified(log); - if(!ret) { - return ret; - } - } - return true; - } - - - @Override - public boolean flush() { - mLogger.debug("==> HdfsLogDestination.flush()"); + mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem); + pathLogfile = new Path(mHdfsFilename); - boolean ret = false; - - OutputStreamWriter writer = mWriter; - - if(writer != null) { - try { - writer.flush(); - - ret = true; - } catch (IOException excp) { - logException("HdfsLogDestination: flush() failed", excp); - } - } - - FSDataOutputStream ostream = mFsDataOutStream; - - if(ostream != null) { - try { - ostream.hflush(); - - ret = true; - } catch (IOException excp) { - logException("HdfsLogDestination: hflush() failed", excp); - } - } - - if(ret) { - mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); - } - - mLogger.debug("<== HdfsLogDestination.flush()"); - - return ret; - } - - private void openFile() { - mLogger.debug("==> HdfsLogDestination.openFile()"); - - closeFile(); - - mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); - - long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L)); - - mHdfsFilename = MiscUtil.replaceTokens(mDirectory + Path.SEPARATOR + mFile, startTime); - - FSDataOutputStream ostream = null; - FileSystem fileSystem = null; - Path pathLogfile = null; - Configuration conf = null; - boolean bOverwrite = false; - - try { - mLogger.debug("HdfsLogDestination.openFile(): opening file " + mHdfsFilename); + mLogger.info("HdfsLogDestination.openFile(): failed in opening file " + failedFilename + ". Will try opening " + mHdfsFilename); + } - URI uri = URI.create(mHdfsFilename); + if (ostream == null) { + mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename); - // TODO: mechanism to XA-HDFS plugin to disable auditing of access checks to the current HDFS file + createParents(pathLogfile, fileSystem); + ostream = fileSystem.create(pathLogfile, bOverwrite); + } + } catch (Throwable ex) { + mLogger.warn("HdfsLogDestination.openFile() failed", ex); + // } finally { + // TODO: unset the property set above to exclude auditing of logfile opening + // System.setProperty(hdfsCurrentFilenameProperty, null); + } - conf = createConfiguration(); - pathLogfile = new Path(mHdfsFilename); - fileSystem = FileSystem.get(uri, conf); + mWriter = createWriter(ostream); - try { - if(fileSystem.exists(pathLogfile)) { // file already exists. either append to the file or write to a new file - if(mIsAppend) { - mLogger.info("HdfsLogDestination.openFile(): opening file for append " + mHdfsFilename); - - ostream = fileSystem.append(pathLogfile); - } else { - mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem); - pathLogfile = new Path(mHdfsFilename); - } - } - - // if file does not exist or if mIsAppend==false, create the file - if(ostream == null) { - mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename); - - createParents(pathLogfile, fileSystem); - ostream = fileSystem.create(pathLogfile, bOverwrite); - } - } catch(IOException excp) { - // append may not be supported by the filesystem; or the file might already be open by another application. Try a different filename - String failedFilename = mHdfsFilename; - - mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem); - pathLogfile = new Path(mHdfsFilename); - - mLogger.info("HdfsLogDestination.openFile(): failed in opening file " + failedFilename + ". Will try opening " + mHdfsFilename); - } - - if(ostream == null){ - mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename); - - createParents(pathLogfile, fileSystem); - ostream = fileSystem.create(pathLogfile, bOverwrite); - } - } catch(Throwable ex) { - mLogger.warn("HdfsLogDestination.openFile() failed", ex); -// } finally { - // TODO: unset the property set above to exclude auditing of logfile opening - // System.setProperty(hdfsCurrentFilenameProperty, null); - } - - mWriter = createWriter(ostream); - - if(mWriter != null) { - mLogger.debug("HdfsLogDestination.openFile(): opened file " + mHdfsFilename); - - mFsDataOutStream = ostream; - mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); - mLastOpenFailedTime = 0; - } else { - mLogger.warn("HdfsLogDestination.openFile(): failed to open file for write " + mHdfsFilename); - - mHdfsFilename = null; - mLastOpenFailedTime = System.currentTimeMillis(); - } - - mLogger.debug("<== HdfsLogDestination.openFile(" + mHdfsFilename + ")"); - } - - private void closeFile() { - mLogger.debug("==> HdfsLogDestination.closeFile()"); - - flush(); - - OutputStreamWriter writer = mWriter; - - mWriter = null; - mFsDataOutStream = null; - - if(writer != null) { - try { - mLogger.info("HdfsLogDestination.closeFile(): closing file " + mHdfsFilename); - - writer.close(); - } catch(IOException excp) { - logException("HdfsLogDestination: failed to close file " + mHdfsFilename, excp); - } - } - - mLogger.debug("<== HdfsLogDestination.closeFile()"); - } - - private void rollover() { - mLogger.debug("==> HdfsLogDestination.rollover()"); - - closeFile(); - - openFile(); - - mLogger.debug("<== HdfsLogDestination.rollover()"); - } - - private void checkFileStatus() { - long now = System.currentTimeMillis(); - - if(mWriter == null) { - if(now > (mLastOpenFailedTime + (mOpenRetryIntervalSeconds * 1000L))) { - openFile(); - } - } else if(now > mNextRolloverTime) { - rollover(); - } else if(now > mNextFlushTime) { - flush(); - } - } - - private OutputStreamWriter createWriter(OutputStream os ) { - OutputStreamWriter writer = null; - - if(os != null) { - if(mEncoding != null) { - try { - writer = new OutputStreamWriter(os, mEncoding); - } catch(UnsupportedEncodingException excp) { - mLogger.warn("HdfsLogDestination.createWriter(): failed to create output writer.", excp); - } - } - - if(writer == null) { - writer = new OutputStreamWriter(os); - } - } - - return writer; - } - - private void createParents(Path pathLogfile, FileSystem fileSystem) { - try { - Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null; - - if(parentPath != null && fileSystem != null && !fileSystem.exists(parentPath)) { - fileSystem.mkdirs(parentPath); - } - } catch (IOException e) { - logException("HdfsLogDestination.createParents() failed", e); - } catch (Throwable e) { - mLogger.warn("HdfsLogDestination.createParents() failed", e); - } - } + if (mWriter != null) { + mLogger.debug("HdfsLogDestination.openFile(): opened file " + mHdfsFilename); + + mFsDataOutStream = ostream; + mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L); + mLastOpenFailedTime = 0; + } else { + mLogger.warn("HdfsLogDestination.openFile(): failed to open file for write " + mHdfsFilename); + + mHdfsFilename = null; + mLastOpenFailedTime = System.currentTimeMillis(); + } + + mLogger.debug("<== HdfsLogDestination.openFile(" + mHdfsFilename + ")"); + } + + private void closeFile() { + mLogger.debug("==> HdfsLogDestination.closeFile()"); + + flush(); + + OutputStreamWriter writer = mWriter; + + mWriter = null; + mFsDataOutStream = null; + + if (writer != null) { + try { + mLogger.info("HdfsLogDestination.closeFile(): closing file " + mHdfsFilename); + + writer.close(); + } catch (IOException excp) { + logException("HdfsLogDestination: failed to close file " + mHdfsFilename, excp); + } + } + + mLogger.debug("<== HdfsLogDestination.closeFile()"); + } + + private void rollover() { + mLogger.debug("==> HdfsLogDestination.rollover()"); + + closeFile(); + + openFile(); + + mLogger.debug("<== HdfsLogDestination.rollover()"); + } + + private void checkFileStatus() { + long now = System.currentTimeMillis(); + + if (mWriter == null) { + if (now > (mLastOpenFailedTime + (mOpenRetryIntervalSeconds * 1000L))) { + openFile(); + } + } else if (now > mNextRolloverTime) { + rollover(); + } else if (now > mNextFlushTime) { + flush(); + } + } + + private OutputStreamWriter createWriter(OutputStream os) { + OutputStreamWriter writer = null; + + if (os != null) { + if (mEncoding != null) { + try { + writer = new OutputStreamWriter(os, mEncoding); + } catch (UnsupportedEncodingException excp) { + mLogger.warn("HdfsLogDestination.createWriter(): failed to create output writer.", excp); + } + } + + if (writer == null) { + writer = new OutputStreamWriter(os); + } + } + + return writer; + } + + private void createParents(Path pathLogfile, FileSystem fileSystem) { + try { + Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null; + + if (parentPath != null && fileSystem != null && !fileSystem.exists(parentPath)) { + fileSystem.mkdirs(parentPath); + } + } catch (IOException e) { + logException("HdfsLogDestination.createParents() failed", e); + } catch (Throwable e) { + mLogger.warn("HdfsLogDestination.createParents() failed", e); + } + } private String getNewFilename(String fileName, FileSystem fileSystem) { - if(fileName == null) { - return ""; - } - - for(int i = 1;; i++) { - String ret = fileName; - - String strToAppend = "-" + Integer.toString(i); - - int extnPos = ret.lastIndexOf("."); - - if(extnPos < 0) { - ret += strToAppend; - } else { - String extn = ret.substring(extnPos); - - ret = ret.substring(0, extnPos) + strToAppend + extn; - } - - if(fileSystem != null && fileExists(ret, fileSystem)) { - continue; - } else { - return ret; - } - } + if (fileName == null) { + return ""; + } + + for (int i = 1; ; i++) { + String ret = fileName; + + String strToAppend = "-" + i; + + int extnPos = ret.lastIndexOf("."); + + if (extnPos < 0) { + ret += strToAppend; + } else { + String extn = ret.substring(extnPos); + + ret = ret.substring(0, extnPos) + strToAppend + extn; + } + + if (fileSystem != null && fileExists(ret, fileSystem)) { + continue; + } else { + return ret; + } + } } private boolean fileExists(String fileName, FileSystem fileSystem) { - boolean ret = false; + boolean ret = false; - if(fileName != null && fileSystem != null) { - Path path = new Path(fileName); + if (fileName != null && fileSystem != null) { + Path path = new Path(fileName); - try { - ret = fileSystem.exists(path); - } catch(IOException excp) { - // ignore - } - } + try { + ret = fileSystem.exists(path); + } catch (IOException excp) { + // ignore + } + } - return ret; + return ret; } private void logException(String msg, IOException excp) { - // during shutdown, the underlying FileSystem might already be closed; so don't print error details - - if(mIsStopInProgress) { - return; - } - - String excpMsgToExclude = EXCP_MSG_FILESYSTEM_CLOSED; - String excpMsg = excp != null ? excp.getMessage() : null; - boolean excpExcludeLogging = (excpMsg != null && excpMsg.contains(excpMsgToExclude)); - - if(! excpExcludeLogging) { - mLogger.warn(msg, excp); - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - - sb.append("HdfsLogDestination {"); - sb.append("Directory=").append(mDirectory).append("; "); - sb.append("File=").append(mFile).append("; "); - sb.append("RolloverIntervalSeconds=").append(mRolloverIntervalSeconds); - sb.append("}"); - - return sb.toString(); - } - - public void setConfigProps(Map configProps) { - this.configProps = configProps; - } - - Configuration createConfiguration() { - Configuration conf = new Configuration(); - if (configProps != null) { - for (Map.Entry entry : configProps.entrySet()) { - String key = entry.getKey(); - String value = entry.getValue(); - // for ease of install config file may contain properties with empty value, skip those - if (StringUtils.isNotEmpty(value)) { - conf.set(key, value); - } - mLogger.info("Adding property to HDFS config: " + key + " => " + value); - } - } - - mLogger.info("Returning HDFS Filesystem Config: " + conf.toString()); - return conf; - } + // during shutdown, the underlying FileSystem might already be closed; so don't print error details + + if (mIsStopInProgress) { + return; + } + + String excpMsg = excp != null ? excp.getMessage() : null; + boolean excpExcludeLogging = (excpMsg != null && excpMsg.contains(EXCP_MSG_FILESYSTEM_CLOSED)); + + if (!excpExcludeLogging) { + mLogger.warn(msg, excp); + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java index 3a452c22a6..6e30aacecc 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java @@ -16,11 +16,6 @@ */ package org.apache.ranger.audit.provider.kafka; -import java.security.PrivilegedExceptionAction; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; @@ -31,162 +26,166 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; public class KafkaAuditProvider extends AuditDestination { - private static final Logger LOG = LoggerFactory.getLogger(KafkaAuditProvider.class); - - public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.kafka.async.max.queue.size"; - public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.kafka.async.max.flush.interval.ms"; - public static final String AUDIT_KAFKA_BROKER_LIST = "xasecure.audit.kafka.broker_list"; - public static final String AUDIT_KAFKA_TOPIC_NAME = "xasecure.audit.kafka.topic_name"; - boolean initDone = false; - - Producer producer = null; - String topic = null; - - @Override - public void init(Properties props) { - LOG.info("init() called"); - super.init(props); - - topic = MiscUtil.getStringProperty(props, - AUDIT_KAFKA_TOPIC_NAME); - if (topic == null || topic.isEmpty()) { - topic = "ranger_audits"; - } - - try { - if (!initDone) { - String brokerList = MiscUtil.getStringProperty(props, - AUDIT_KAFKA_BROKER_LIST); - if (brokerList == null || brokerList.isEmpty()) { - brokerList = "localhost:9092"; - } - - final Map kakfaProps = new HashMap(); - kakfaProps.put("metadata.broker.list", brokerList); - kakfaProps.put("serializer.class", - "kafka.serializer.StringEncoder"); - // kakfaProps.put("partitioner.class", - // "example.producer.SimplePartitioner"); - kakfaProps.put("request.required.acks", "1"); - - LOG.info("Connecting to Kafka producer using properties:" - + kakfaProps.toString()); - - producer = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction>) () -> new KafkaProducer<>(kakfaProps)); - - initDone = true; - } - } catch (Throwable t) { - LOG.error("Error initializing kafka:", t); - } - } - - @Override - public boolean log(AuditEventBase event) { - if (event instanceof AuthzAuditEvent) { - AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; - - if (authzEvent.getAgentHostname() == null) { - authzEvent.setAgentHostname(MiscUtil.getHostname()); - } - - if (authzEvent.getLogType() == null) { - authzEvent.setLogType("RangerAudit"); - } - - if (authzEvent.getEventId() == null) { - authzEvent.setEventId(MiscUtil.generateUniqueId()); - } - } - - String message = MiscUtil.stringify(event); - try { - - if (producer != null) { - // TODO: Add partition key - final ProducerRecord keyedMessage = new ProducerRecord( - topic, message); - - MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { - producer.send(keyedMessage); - return null; - }); - - } else { - LOG.info("AUDIT LOG (Kafka Down):" + message); - } - } catch (Throwable t) { - LOG.error("Error sending message to Kafka topic. topic=" + topic - + ", message=" + message, t); - return false; - } - return true; - } - - @Override - public boolean log(Collection events) { - for (AuditEventBase event : events) { - log(event); - } - return true; - } - - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - @Override - public boolean logJSON(Collection events) { - for (String event : events) { - logJSON(event); - } - return false; - } - - @Override - public void start() { - LOG.info("start() called"); - // TODO Auto-generated method stub - - } - - @Override - public void stop() { - LOG.info("stop() called"); - if (producer != null) { - try { - MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { - producer.close(); - return null; - }); - } catch (Throwable t) { - LOG.error("Error closing Kafka producer"); - } - } - } - - @Override - public void waitToComplete() { - LOG.info("waitToComplete() called"); - } - - @Override - public void waitToComplete(long timeout) { - } - - @Override - public void flush() { - LOG.info("flush() called"); - - } - - public boolean isAsync() { - return true; - } + private static final Logger LOG = LoggerFactory.getLogger(KafkaAuditProvider.class); + + public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.kafka.async.max.queue.size"; + public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.kafka.async.max.flush.interval.ms"; + public static final String AUDIT_KAFKA_BROKER_LIST = "xasecure.audit.kafka.broker_list"; + public static final String AUDIT_KAFKA_TOPIC_NAME = "xasecure.audit.kafka.topic_name"; + + boolean initDone; + Producer producer; + String topic; + + @Override + public void init(Properties props) { + LOG.info("init() called"); + + super.init(props); + + topic = MiscUtil.getStringProperty(props, AUDIT_KAFKA_TOPIC_NAME); + + if (topic == null || topic.isEmpty()) { + topic = "ranger_audits"; + } + + try { + if (!initDone) { + String brokerList = MiscUtil.getStringProperty(props, AUDIT_KAFKA_BROKER_LIST); + + if (brokerList == null || brokerList.isEmpty()) { + brokerList = "localhost:9092"; + } + final Map kakfaProps = new HashMap<>(); + + kakfaProps.put("metadata.broker.list", brokerList); + kakfaProps.put("serializer.class", "kafka.serializer.StringEncoder"); + // kakfaProps.put("partitioner.class", "example.producer.SimplePartitioner"); + kakfaProps.put("request.required.acks", "1"); + + LOG.info("Connecting to Kafka producer using properties:{}", kakfaProps); + + producer = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction>) () -> new KafkaProducer<>(kakfaProps)); + initDone = true; + } + } catch (Throwable t) { + LOG.error("Error initializing kafka:", t); + } + } + + @Override + public boolean log(AuditEventBase event) { + if (event instanceof AuthzAuditEvent) { + AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; + + if (authzEvent.getAgentHostname() == null) { + authzEvent.setAgentHostname(MiscUtil.getHostname()); + } + + if (authzEvent.getLogType() == null) { + authzEvent.setLogType("RangerAudit"); + } + + if (authzEvent.getEventId() == null) { + authzEvent.setEventId(MiscUtil.generateUniqueId()); + } + } + + String message = MiscUtil.stringify(event); + + try { + if (producer != null) { + // TODO: Add partition key + final ProducerRecord keyedMessage = new ProducerRecord<>(topic, message); + + MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + producer.send(keyedMessage); + + return null; + }); + } else { + LOG.info("AUDIT LOG (Kafka Down):{}", message); + } + } catch (Throwable t) { + LOG.error("Error sending message to Kafka topic. topic={}, message={}", topic, message, t); + + return false; + } + + return true; + } + + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, AuthzAuditEvent.class); + + return log(eventObj); + } + + @Override + public boolean logJSON(Collection events) { + for (String event : events) { + logJSON(event); + } + + return false; + } + + @Override + public boolean log(Collection events) { + for (AuditEventBase event : events) { + log(event); + } + + return true; + } + + @Override + public void flush() { + LOG.info("flush() called"); + } + + @Override + public void start() { + LOG.info("start() called"); + // TODO Auto-generated method stub + } + + @Override + public void stop() { + LOG.info("stop() called"); + + if (producer != null) { + try { + MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + producer.close(); + + return null; + }); + } catch (Throwable t) { + LOG.error("Error closing Kafka producer"); + } + } + } + + @Override + public void waitToComplete() { + LOG.info("waitToComplete() called"); + } + + @Override + public void waitToComplete(long timeout) { + } + + public boolean isAsync() { + return true; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java index 691cef0021..ef0e210b85 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java @@ -19,13 +19,6 @@ package org.apache.ranger.audit.provider.solr; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.Collection; -import java.util.Collections; -import java.util.Date; -import java.util.Properties; - import org.apache.ranger.audit.destination.AuditDestination; import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.model.AuthzAuditEvent; @@ -38,268 +31,274 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.Properties; + public class SolrAuditProvider extends AuditDestination { - private static final Logger LOG = LoggerFactory.getLogger(SolrAuditProvider.class); - - public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.solr.async.max.queue.size"; - public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.solr.async.max.flush.interval.ms"; - public static final String AUDIT_RETRY_WAIT_PROP = "xasecure.audit.solr.retry.ms"; - - static final Object lock = new Object(); - volatile SolrClient solrClient = null; - Date lastConnectTime = null; - long lastFailTime = 0; - - int retryWaitTime = 30000; - - public SolrAuditProvider() { - } - - @Override - public void init(Properties props) { - LOG.info("init() called"); - super.init(props); - - retryWaitTime = MiscUtil.getIntProperty(props, - AUDIT_RETRY_WAIT_PROP, retryWaitTime); - } - - void connect() { - SolrClient me = solrClient; - if (me == null) { - synchronized (lock) { - me = solrClient; - if (me == null) { - final String solrURL = MiscUtil.getStringProperty(props, - "xasecure.audit.solr.solr_url"); - - if (lastConnectTime != null) { - // Let's wait for enough time before retrying - long diff = System.currentTimeMillis() - - lastConnectTime.getTime(); - if (diff < retryWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Ignore connecting to solr url=" - + solrURL + ", lastConnect=" + diff - + "ms"); - } - return; - } - } - lastConnectTime = new Date(); - - if (solrURL == null || solrURL.isEmpty()) { - LOG.error("Solr URL for Audit is empty"); - return; - } - - try { - // TODO: Need to support SolrCloud also - solrClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public SolrClient run() throws Exception { - HttpSolrClient.Builder builder = new HttpSolrClient.Builder(); - builder.withBaseSolrUrl(solrURL); - builder.allowCompression(true); - builder.withConnectionTimeout(1000); - HttpSolrClient httpSolrClient = builder.build(); - return httpSolrClient; - }; - }); - - me = solrClient; - } catch (Throwable t) { - LOG.error("Can't connect to Solr server. URL=" - + solrURL, t); - } - } - } - } - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. - * audit.model.AuditEventBase) - */ - @Override - public boolean log(AuditEventBase event) { - if (!(event instanceof AuthzAuditEvent)) { - LOG.error(event.getClass().getName() - + " audit event class type is not supported"); - return false; - } - AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; - // TODO: This should be done at a higher level - - if (authzEvent.getAgentHostname() == null) { - authzEvent.setAgentHostname(MiscUtil.getHostname()); - } - - if (authzEvent.getLogType() == null) { - authzEvent.setLogType("RangerAudit"); - } - - if (authzEvent.getEventId() == null) { - authzEvent.setEventId(MiscUtil.generateUniqueId()); - } - - try { - if (solrClient == null) { - connect(); - if (solrClient == null) { - // Solr is still not initialized. So need to throw error - return false; - } - } - - if (lastFailTime > 0) { - long diff = System.currentTimeMillis() - lastFailTime; - if (diff < retryWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Ignore sending audit. lastConnect=" + diff - + " ms"); - } - return false; - } - } - // Convert AuditEventBase to Solr document - final SolrInputDocument document = toSolrDoc(authzEvent); - final Collection docs = Collections.singletonList(document); - final UpdateResponse response = SolrAppUtil.addDocsToSolr(solrClient, docs); - - if (response.getStatus() != 0) { - lastFailTime = System.currentTimeMillis(); - - // System.out.println("Response=" + response.toString() - // + ", status= " + response.getStatus() + ", event=" - // + event); - // throw new Exception("Aborting. event=" + event + - // ", response=" - // + response.toString()); - } else { - lastFailTime = 0; - } - - } catch (Throwable t) { - LOG.error("Error sending message to Solr", t); - return false; - } - return true; - } - - @Override - public boolean log(Collection events) { - for (AuditEventBase event : events) { - log(event); - } - return true; - } - - @Override - public boolean logJSON(String event) { - AuditEventBase eventObj = MiscUtil.fromJson(event, - AuthzAuditEvent.class); - return log(eventObj); - } - - @Override - public boolean logJSON(Collection events) { - for (String event : events) { - logJSON(event); - } - return false; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - public void start() { - connect(); - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#stop() - */ - @Override - public void stop() { - LOG.info("SolrAuditProvider.stop() called.."); - try { - if (solrClient != null) { - solrClient.close(); - } - } catch (IOException ioe) { - LOG.error("Error while stopping slor!", ioe); - } finally { - solrClient = null; - } - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() - */ - @Override - public void waitToComplete() { - - } - - - @Override - public void waitToComplete(long timeout) { - - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - // TODO Auto-generated method stub - - } - - SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) { - SolrInputDocument doc = new SolrInputDocument(); - doc.addField("id", auditEvent.getEventId()); - doc.addField("access", auditEvent.getAccessType()); - doc.addField("enforcer", auditEvent.getAclEnforcer()); - doc.addField("agent", auditEvent.getAgentId()); - doc.addField("repo", auditEvent.getRepositoryName()); - doc.addField("sess", auditEvent.getSessionId()); - doc.addField("reqUser", auditEvent.getUser()); - doc.addField("reqData", auditEvent.getRequestData()); - doc.addField("resource", auditEvent.getResourcePath()); - doc.addField("cliIP", auditEvent.getClientIP()); - doc.addField("logType", auditEvent.getLogType()); - doc.addField("result", auditEvent.getAccessResult()); - doc.addField("policy", auditEvent.getPolicyId()); - doc.addField("repoType", auditEvent.getRepositoryType()); - doc.addField("resType", auditEvent.getResourceType()); - doc.addField("reason", auditEvent.getResultReason()); - doc.addField("action", auditEvent.getAction()); - doc.addField("evtTime", auditEvent.getEventTime()); - doc.addField("tags", auditEvent.getTags()); - doc.addField("datasets", auditEvent.getDatasets()); - doc.addField("projects", auditEvent.getProjects()); - doc.addField("cluster", auditEvent.getClusterName()); - doc.addField("zone", auditEvent.getZoneName()); - doc.addField("agentHost", auditEvent.getAgentHostname()); - return doc; - } - - public boolean isAsync() { - return true; - } + private static final Logger LOG = LoggerFactory.getLogger(SolrAuditProvider.class); + + public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.solr.async.max.queue.size"; + public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.solr.async.max.flush.interval.ms"; + public static final String AUDIT_RETRY_WAIT_PROP = "xasecure.audit.solr.retry.ms"; + + static final Object lock = new Object(); + + volatile SolrClient solrClient; + + Date lastConnectTime; + long lastFailTime; + int retryWaitTime = 30000; + + public SolrAuditProvider() { + } + + @Override + public void init(Properties props) { + LOG.info("init() called"); + + super.init(props); + + retryWaitTime = MiscUtil.getIntProperty(props, AUDIT_RETRY_WAIT_PROP, retryWaitTime); + } + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.audit.model.AuditEventBase) + */ + @Override + public boolean log(AuditEventBase event) { + if (!(event instanceof AuthzAuditEvent)) { + LOG.error("{} audit event class type is not supported", event.getClass().getName()); + + return false; + } + + AuthzAuditEvent authzEvent = (AuthzAuditEvent) event; + // TODO: This should be done at a higher level + + if (authzEvent.getAgentHostname() == null) { + authzEvent.setAgentHostname(MiscUtil.getHostname()); + } + + if (authzEvent.getLogType() == null) { + authzEvent.setLogType("RangerAudit"); + } + + if (authzEvent.getEventId() == null) { + authzEvent.setEventId(MiscUtil.generateUniqueId()); + } + + try { + SolrClient solrClient = this.solrClient; + + if (solrClient == null) { + connect(); + + solrClient = this.solrClient; + + if (solrClient == null) { + // Solr is still not initialized. So need to throw error + return false; + } + } + + if (lastFailTime > 0) { + long diff = System.currentTimeMillis() - lastFailTime; + + if (diff < retryWaitTime) { + LOG.debug("Ignore sending audit. lastConnect={} ms", diff); + + return false; + } + } + + // Convert AuditEventBase to Solr document + final SolrInputDocument document = toSolrDoc(authzEvent); + final Collection docs = Collections.singletonList(document); + final UpdateResponse response = SolrAppUtil.addDocsToSolr(solrClient, docs); + + if (response.getStatus() != 0) { + lastFailTime = System.currentTimeMillis(); + } else { + lastFailTime = 0; + } + } catch (Throwable t) { + LOG.error("Error sending message to Solr", t); + + return false; + } + + return true; + } + + @Override + public boolean logJSON(String event) { + AuditEventBase eventObj = MiscUtil.fromJson(event, AuthzAuditEvent.class); + + return log(eventObj); + } + + @Override + public boolean logJSON(Collection events) { + for (String event : events) { + logJSON(event); + } + + return false; + } + + @Override + public boolean log(Collection events) { + for (AuditEventBase event : events) { + log(event); + } + + return true; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + // TODO Auto-generated method stub + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public void start() { + connect(); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#stop() + */ + @Override + public void stop() { + LOG.info("SolrAuditProvider.stop() called.."); + + try { + SolrClient solrClient = this.solrClient; + + if (solrClient != null) { + solrClient.close(); + } + } catch (IOException ioe) { + LOG.error("Error while stopping slor!", ioe); + } finally { + solrClient = null; + } + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() + */ + @Override + public void waitToComplete() { + } + + @Override + public void waitToComplete(long timeout) { + } + + public boolean isAsync() { + return true; + } + + void connect() { + SolrClient me = solrClient; + + if (me == null) { + synchronized (lock) { + me = solrClient; + + if (me == null) { + final String solrURL = MiscUtil.getStringProperty(props, "xasecure.audit.solr.solr_url"); + + if (lastConnectTime != null) { + // Let's wait for enough time before retrying + long diff = System.currentTimeMillis() - lastConnectTime.getTime(); + + if (diff < retryWaitTime) { + LOG.debug("Ignore connecting to solr url={}, lastConnect={}ms", solrURL, diff); + + return; + } + } + + lastConnectTime = new Date(); + + if (solrURL == null || solrURL.isEmpty()) { + LOG.error("Solr URL for Audit is empty"); + + return; + } + + try { + // TODO: Need to support SolrCloud also + me = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + HttpSolrClient.Builder builder = new HttpSolrClient.Builder(); + + builder.withBaseSolrUrl(solrURL); + builder.allowCompression(true); + builder.withConnectionTimeout(1000); + + return builder.build(); + }); + + solrClient = me; + } catch (Throwable t) { + LOG.error("Can't connect to Solr server. URL={}", solrURL, t); + } + } + } + } + } + + SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) { + SolrInputDocument doc = new SolrInputDocument(); + + doc.addField("id", auditEvent.getEventId()); + doc.addField("access", auditEvent.getAccessType()); + doc.addField("enforcer", auditEvent.getAclEnforcer()); + doc.addField("agent", auditEvent.getAgentId()); + doc.addField("repo", auditEvent.getRepositoryName()); + doc.addField("sess", auditEvent.getSessionId()); + doc.addField("reqUser", auditEvent.getUser()); + doc.addField("reqData", auditEvent.getRequestData()); + doc.addField("resource", auditEvent.getResourcePath()); + doc.addField("cliIP", auditEvent.getClientIP()); + doc.addField("logType", auditEvent.getLogType()); + doc.addField("result", auditEvent.getAccessResult()); + doc.addField("policy", auditEvent.getPolicyId()); + doc.addField("repoType", auditEvent.getRepositoryType()); + doc.addField("resType", auditEvent.getResourceType()); + doc.addField("reason", auditEvent.getResultReason()); + doc.addField("action", auditEvent.getAction()); + doc.addField("evtTime", auditEvent.getEventTime()); + doc.addField("tags", auditEvent.getTags()); + doc.addField("datasets", auditEvent.getDatasets()); + doc.addField("projects", auditEvent.getProjects()); + doc.addField("cluster", auditEvent.getClusterName()); + doc.addField("zone", auditEvent.getZoneName()); + doc.addField("agentHost", auditEvent.getAgentHostname()); + + return doc; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java index b226b4e201..32b989981f 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java @@ -19,186 +19,200 @@ package org.apache.ranger.audit.queue; -import java.util.ArrayList; -import java.util.Collection; -import java.util.concurrent.LinkedBlockingQueue; - import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.provider.AuditHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; +import java.util.ArrayList; +import java.util.Collection; +import java.util.concurrent.LinkedBlockingQueue; + /** * This is a non-blocking queue with no limit on capacity. */ public class AuditAsyncQueue extends AuditQueue implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(AuditAsyncQueue.class); - - LinkedBlockingQueue queue = new LinkedBlockingQueue(); - Thread consumerThread = null; - - static final int MAX_DRAIN = 1000; - static int threadCount = 0; - static final String DEFAULT_NAME = "async"; - - public AuditAsyncQueue(AuditHandler consumer) { - super(consumer); - setName(DEFAULT_NAME); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. - * audit.model.AuditEventBase) - */ - @Override - public boolean log(AuditEventBase event) { - logStatusIfRequired(); - - addTotalCount(1); - - // Add to the queue and return ASAP - if (queue.size() >= getMaxQueueSize()) { - addFailedCount(1); - return false; - } - queue.add(event); - return true; - } - - @Override - public boolean log(Collection events) { - boolean ret = true; - for (AuditEventBase event : events) { - ret = log(event); - if (!ret) { - break; - } - } - return ret; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - public void start() { - if (consumer != null) { - consumer.start(); - } else { - logger.error("consumer is not set. Nothing will be sent to any consumer. name=" - + getName()); - } - - consumerThread = new Thread(this, this.getClass().getName() - + (threadCount++)); - consumerThread.setDaemon(true); - consumerThread.start(); - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#stop() - */ - @Override - public void stop() { - logger.info("Stop called. name=" + getName()); - setDrain(true); - try { - if (consumerThread != null) { - logger.info("Interrupting consumerThread. name=" + getName() - + ", consumer=" - + (consumer == null ? null : consumer.getName())); - consumerThread.interrupt(); - } - } catch (Throwable t) { - // ignore any exception - } - consumerThread = null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Runnable#run() - */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Throwable t) { - logger.error("Exited thread abnormaly. queue=" + getName(), t); - } - } - - @Override - public void logStatus() { - super.logStatus(); - - if (isStatusLogEnabled()) { - logger.info("AuditAsyncQueue.log(name={}): totalCount={}, currentQueueLength={}", getName(), getTotalCount(), queue.size()); - } - } - - public int size() { return queue.size(); } - - public void runLogAudit() { - while (true) { - try { - AuditEventBase event = null; - if (!isDrain()) { - // For Transfer queue take() is blocking - event = queue.take(); - } else { - // For Transfer queue poll() is non blocking - event = queue.poll(); - } - if (event != null) { - Collection eventList = new ArrayList(); - eventList.add(event); - queue.drainTo(eventList, MAX_DRAIN - 1); - consumer.log(eventList); - - logStatusIfRequired(); - } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - } catch (Throwable t) { - logger.error("Caught error during processing request.", t); - } - if (isDrain()) { - if (queue.isEmpty()) { - break; - } - if (isDrainMaxTimeElapsed()) { - logger.warn("Exiting polling loop because max time allowed reached. name=" - + getName() - + ", waited for " - + (stopTime - System.currentTimeMillis()) + " ms"); - } - } - } - logger.info("Exiting polling loop. name=" + getName()); - - try { - // Call stop on the consumer - logger.info("Calling to stop consumer. name=" + getName() - + ", consumer.name=" + consumer.getName()); - - // Call stop on the consumer - consumer.stop(); - } catch (Throwable t) { - logger.error("Error while calling stop on consumer.", t); - } - logger.info("Exiting consumerThread.run() method. name=" + getName()); - } - + private static final Logger logger = LoggerFactory.getLogger(AuditAsyncQueue.class); + + static final int MAX_DRAIN = 1000; + static final String DEFAULT_NAME = "async"; + static int threadCount; + + LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); + Thread consumerThread; + + public AuditAsyncQueue(AuditHandler consumer) { + super(consumer); + + setName(DEFAULT_NAME); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. + * audit.model.AuditEventBase) + */ + @Override + public boolean log(AuditEventBase event) { + logStatusIfRequired(); + + addTotalCount(1); + + // Add to the queue and return ASAP + if (queue.size() >= getMaxQueueSize()) { + addFailedCount(1); + + return false; + } + + queue.add(event); + + return true; + } + + @Override + public void logStatus() { + super.logStatus(); + + if (isStatusLogEnabled()) { + logger.info("AuditAsyncQueue.log(name={}): totalCount={}, currentQueueLength={}", getName(), getTotalCount(), queue.size()); + } + } + + @Override + public boolean log(Collection events) { + boolean ret = true; + + for (AuditEventBase event : events) { + ret = log(event); + + if (!ret) { + break; + } + } + + return ret; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public void start() { + if (consumer != null) { + consumer.start(); + } else { + logger.error("consumer is not set. Nothing will be sent to any consumer. name={}", getName()); + } + + consumerThread = new Thread(this, this.getClass().getName() + (threadCount++)); + + consumerThread.setDaemon(true); + consumerThread.start(); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#stop() + */ + @Override + public void stop() { + logger.info("Stop called. name={}", getName()); + + setDrain(true); + + try { + if (consumerThread != null) { + logger.info("Interrupting consumerThread. name={}, consumer={}", getName(), (consumer == null ? null : consumer.getName())); + + consumerThread.interrupt(); + } + } catch (Throwable t) { + // ignore any exception + } + + consumerThread = null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Runnable#run() + */ + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Throwable t) { + logger.error("Exited thread abnormaly. queue={}", getName(), t); + } + } + + public int size() { + return queue.size(); + } + + public void runLogAudit() { + while (true) { + try { + final AuditEventBase event; + + if (!isDrain()) { + // For Transfer queue take() is blocking + event = queue.take(); + } else { + // For Transfer queue poll() is non blocking + event = queue.poll(); + } + + if (event != null) { + Collection eventList = new ArrayList<>(); + + eventList.add(event); + + queue.drainTo(eventList, MAX_DRAIN - 1); + + consumer.log(eventList); + + logStatusIfRequired(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + } catch (Throwable t) { + logger.error("Caught error during processing request.", t); + } + + if (isDrain()) { + if (queue.isEmpty()) { + break; + } + + if (isDrainMaxTimeElapsed()) { + logger.warn("Exiting polling loop because max time allowed reached. name={}, waited for {} ms", getName(), (stopTime - System.currentTimeMillis())); + } + } + } + + logger.info("Exiting polling loop. name={}", getName()); + + try { + // Call stop on the consumer + logger.info("Calling to stop consumer. name={}, consumer.name={}", getName(), consumer.getName()); + + // Call stop on the consumer + consumer.stop(); + } catch (Throwable t) { + logger.error("Error while calling stop on consumer.", t); + } + + logger.info("Exiting consumerThread.run() method. name={}", getName()); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java index 103f926566..7b9206cd3d 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java @@ -19,6 +19,12 @@ package org.apache.ranger.audit.queue; +import org.apache.ranger.audit.model.AuditEventBase; +import org.apache.ranger.audit.provider.AuditHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + import java.util.ArrayList; import java.util.Collection; import java.util.Properties; @@ -26,329 +32,354 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; -import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.provider.AuditHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - public class AuditBatchQueue extends AuditQueue implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(AuditBatchQueue.class); - - private BlockingQueue queue = null; - private Collection localBatchBuffer = new ArrayList(); - - Thread consumerThread = null; - static int threadCount = 0; - static final String DEFAULT_NAME = "batch"; - - public AuditBatchQueue(AuditHandler consumer) { - super(consumer); - setName(DEFAULT_NAME); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. - * audit.model.AuditEventBase) - */ - @Override - public boolean log(AuditEventBase event) { - try { - // Add to batchQueue. Block if full - queue.put(event); - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - - return true; - } - - @Override - public boolean log(Collection events) { - boolean ret = true; - for (AuditEventBase event : events) { - ret = log(event); - if (!ret) { - break; - } - } - return ret; - } - - @Override - public void init(Properties prop, String basePropertyName) { - String propPrefix = "xasecure.audit.batch"; - if (basePropertyName != null) { - propPrefix = basePropertyName; - } - - super.init(prop, propPrefix); - - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - synchronized public void start() { - if (consumerThread != null) { - logger.error("Provider is already started. name=" + getName()); - return; - } - logger.info("Creating ArrayBlockingQueue with maxSize=" - + getMaxQueueSize()); - queue = new ArrayBlockingQueue(getMaxQueueSize()); - - // Start the consumer first - consumer.start(); - - // Then the FileSpooler - if (fileSpoolerEnabled) { - fileSpooler.start(); - } - - // Finally the queue listener - consumerThread = new Thread(this, this.getClass().getName() - + (threadCount++)); - consumerThread.setDaemon(true); - consumerThread.start(); - - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#stop() - */ - @Override - public void stop() { - logger.info("Stop called. name=" + getName()); - setDrain(true); - flush(); - try { - if (consumerThread != null) { - logger.info("Interrupting consumerThread. name=" + getName() - + ", consumer=" - + (consumer == null ? null : consumer.getName())); - - consumerThread.interrupt(); - } - } catch (Throwable t) { - // ignore any exception - } - consumerThread = null; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() - */ - @Override - public void waitToComplete() { - int defaultTimeOut = -1; - waitToComplete(defaultTimeOut); - consumer.waitToComplete(defaultTimeOut); - } - - @Override - public void waitToComplete(long timeout) { - setDrain(true); - flush(); - long sleepTime = 1000; - long startTime = System.currentTimeMillis(); - int prevQueueSize = -1; - int staticLoopCount = 0; - while ((queue.size() > 0 || localBatchBuffer.size() > 0)) { - if (prevQueueSize == queue.size()) { - logger.error("Queue size is not changing. " + getName() - + ".size=" + queue.size()); - staticLoopCount++; - if (staticLoopCount > 5) { - logger.error("Aborting writing to consumer. Some logs will be discarded." - + getName() + ".size=" + queue.size()); - break; - } - } else { - staticLoopCount = 0; - prevQueueSize = queue.size(); - } - if (consumerThread != null) { - consumerThread.interrupt(); - } - try { - Thread.sleep(sleepTime); - if (timeout > 0 - && (System.currentTimeMillis() - startTime > timeout)) { - break; - } - } catch (InterruptedException e) { - break; - } - } - consumer.waitToComplete(timeout); - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - if (fileSpoolerEnabled) { - fileSpooler.flush(); - } - consumer.flush(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Runnable#run() - */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Throwable t) { - logger.error("Exited thread abnormaly. queue=" + getName(), t); - } - } - - public void runLogAudit() { - long lastDispatchTime = System.currentTimeMillis(); - boolean isDestActive = true; - while (true) { - logStatusIfRequired(); - - // Time to next dispatch - long nextDispatchDuration = lastDispatchTime - - System.currentTimeMillis() + getMaxBatchInterval(); - - boolean isToSpool = false; - boolean fileSpoolDrain = false; - try { - if (fileSpoolerEnabled && fileSpooler.isPending()) { - int percentUsed = queue.size() * 100 / getMaxQueueSize(); - long lastAttemptDelta = fileSpooler.getLastAttemptTimeDelta(); - - fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime; - // If we should even read from queue? - if (!isDrain() && !fileSpoolDrain && percentUsed < fileSpoolDrainThresholdPercent) { - // Since some files are still under progress and it is - // not in drain mode, lets wait and retry - if (nextDispatchDuration > 0) { - Thread.sleep(nextDispatchDuration); - } - lastDispatchTime = System.currentTimeMillis(); - continue; - } - isToSpool = true; - } - - AuditEventBase event = null; - - if (!isToSpool && !isDrain() && !fileSpoolDrain && nextDispatchDuration > 0) { - event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS); - } else { - // For poll() is non blocking - event = queue.poll(); - } - - if (event != null) { - localBatchBuffer.add(event); - if (getMaxBatchSize() >= localBatchBuffer.size()) { - queue.drainTo(localBatchBuffer, getMaxBatchSize() - localBatchBuffer.size()); - } - } else { - // poll returned due to timeout, so reseting clock - nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval(); - lastDispatchTime = System.currentTimeMillis(); - } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - setDrain(true); - } catch (Throwable t) { - logger.error("Caught error during processing request.", t); - } - - addTotalCount(localBatchBuffer.size()); - if (localBatchBuffer.size() > 0 && isToSpool) { - // Let spool to the file directly - if (isDestActive) { - logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName()); - } - isDestActive = false; - // Just before stashing - lastDispatchTime = System.currentTimeMillis(); - fileSpooler.stashLogs(localBatchBuffer); - addStashedCount(localBatchBuffer.size()); - localBatchBuffer.clear(); - } else if (localBatchBuffer.size() > 0 && - (isDrain() || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) { - if (fileSpoolerEnabled && !isDestActive) { - logger.info("Switching to writing to the destination. Queue = {}, dest = {}", - getName(), consumer.getName()); - } - // Reset time just before sending the logs - lastDispatchTime = System.currentTimeMillis(); - boolean ret = consumer.log(localBatchBuffer); - if (!ret) { - if (fileSpoolerEnabled) { - logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName()); - // Transient error. Stash and move on - fileSpooler.stashLogs(localBatchBuffer); - isDestActive = false; - addStashedCount(localBatchBuffer.size()); - } else { - // We need to drop this event - addFailedCount(localBatchBuffer.size()); - logFailedEvent(localBatchBuffer); - } - } else { - isDestActive = true; - addSuccessCount(localBatchBuffer.size()); - } - localBatchBuffer.clear(); - } - - if (isDrain()) { - if (!queue.isEmpty() || localBatchBuffer.size() > 0) { - logger.info("Queue is not empty. Will retry. queue.size = {}, localBatchBuffer.size = {}", - queue.size(), localBatchBuffer.size()); - } else { - break; - } - if (isDrainMaxTimeElapsed()) { - logger.warn("Exiting polling loop because max time allowed reached. name=" - + getName() - + ", waited for " - + (stopTime - System.currentTimeMillis()) + " ms"); - } - } - } - - logger.info("Exiting consumerThread. Queue = {}, dest = {}", getName(), consumer.getName()); - try { - // Call stop on the consumer - logger.info("Calling to stop consumer. name = {}, consumer.name = {}", getName(), consumer.getName()); - - consumer.stop(); - if (fileSpoolerEnabled) { - fileSpooler.stop(); - } - } catch (Throwable t) { - logger.error("Error while calling stop on consumer.", t); - } - logStatus(); - logger.info("Exiting consumerThread.run() method. name=" + getName()); - } + private static final Logger logger = LoggerFactory.getLogger(AuditBatchQueue.class); + + static final String DEFAULT_NAME = "batch"; + static int threadCount; + + Thread consumerThread; + + private BlockingQueue queue; + private final Collection localBatchBuffer = new ArrayList<>(); + + public AuditBatchQueue(AuditHandler consumer) { + super(consumer); + + setName(DEFAULT_NAME); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. + * audit.model.AuditEventBase) + */ + @Override + public boolean log(AuditEventBase event) { + try { + // Add to batchQueue. Block if full + queue.put(event); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + + return true; + } + + @Override + public boolean log(Collection events) { + boolean ret = true; + + for (AuditEventBase event : events) { + ret = log(event); + + if (!ret) { + break; + } + } + + return ret; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public synchronized void start() { + if (consumerThread != null) { + logger.error("Provider is already started. name={}", getName()); + + return; + } + + logger.info("Creating ArrayBlockingQueue with maxSize={}", getMaxQueueSize()); + + queue = new ArrayBlockingQueue<>(getMaxQueueSize()); + + // Start the consumer first + consumer.start(); + + // Then the FileSpooler + if (fileSpoolerEnabled) { + fileSpooler.start(); + } + + // Finally the queue listener + consumerThread = new Thread(this, this.getClass().getName() + (threadCount++)); + + consumerThread.setDaemon(true); + consumerThread.start(); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#stop() + */ + @Override + public void stop() { + logger.info("Stop called. name={}", getName()); + + setDrain(true); + flush(); + + try { + if (consumerThread != null) { + logger.info("Interrupting consumerThread. name={}, consumer={}", getName(), (consumer == null ? null : consumer.getName())); + + consumerThread.interrupt(); + } + } catch (Throwable t) { + // ignore any exception + } + + consumerThread = null; + } + + @Override + public void init(Properties prop, String basePropertyName) { + String propPrefix = "xasecure.audit.batch"; + + if (basePropertyName != null) { + propPrefix = basePropertyName; + } + + super.init(prop, propPrefix); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() + */ + @Override + public void waitToComplete() { + int defaultTimeOut = -1; + + waitToComplete(defaultTimeOut); + + consumer.waitToComplete(defaultTimeOut); + } + + @Override + public void waitToComplete(long timeout) { + setDrain(true); + flush(); + + long sleepTime = 1000; + long startTime = System.currentTimeMillis(); + int prevQueueSize = -1; + int staticLoopCount = 0; + + while ((!queue.isEmpty() || !localBatchBuffer.isEmpty())) { + if (prevQueueSize == queue.size()) { + logger.error("Queue size is not changing. {}.size={}", getName(), queue.size()); + + staticLoopCount++; + + if (staticLoopCount > 5) { + logger.error("Aborting writing to consumer. Some logs will be discarded.{}.size={}", getName(), queue.size()); + + break; + } + } else { + staticLoopCount = 0; + prevQueueSize = queue.size(); + } + + if (consumerThread != null) { + consumerThread.interrupt(); + } + + try { + Thread.sleep(sleepTime); + + if (timeout > 0 && (System.currentTimeMillis() - startTime > timeout)) { + break; + } + } catch (InterruptedException e) { + break; + } + } + + consumer.waitToComplete(timeout); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + if (fileSpoolerEnabled) { + fileSpooler.flush(); + } + + consumer.flush(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Runnable#run() + */ + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Throwable t) { + logger.error("Exited thread abnormaly. queue={}", getName(), t); + } + } + + public void runLogAudit() { + long lastDispatchTime = System.currentTimeMillis(); + boolean isDestActive = true; + + while (true) { + logStatusIfRequired(); + + // Time to next dispatch + long nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval(); + boolean isToSpool = false; + boolean fileSpoolDrain = false; + + try { + if (fileSpoolerEnabled && fileSpooler.isPending()) { + int percentUsed = queue.size() * 100 / getMaxQueueSize(); + long lastAttemptDelta = fileSpooler.getLastAttemptTimeDelta(); + + fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime; + + // If we should even read from queue? + if (!isDrain() && !fileSpoolDrain && percentUsed < fileSpoolDrainThresholdPercent) { + // Since some files are still under progress and it is + // not in drain mode, lets wait and retry + if (nextDispatchDuration > 0) { + Thread.sleep(nextDispatchDuration); + } + + lastDispatchTime = System.currentTimeMillis(); + + continue; + } + + isToSpool = true; + } + + final AuditEventBase event; + + if (!isToSpool && !isDrain() && !fileSpoolDrain && nextDispatchDuration > 0) { + event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS); + } else { + // For poll() is non blocking + event = queue.poll(); + } + + if (event != null) { + localBatchBuffer.add(event); + if (getMaxBatchSize() >= localBatchBuffer.size()) { + queue.drainTo(localBatchBuffer, getMaxBatchSize() - localBatchBuffer.size()); + } + } else { + // poll returned due to timeout, so reseting clock + nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval(); + lastDispatchTime = System.currentTimeMillis(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + + setDrain(true); + } catch (Throwable t) { + logger.error("Caught error during processing request.", t); + } + + addTotalCount(localBatchBuffer.size()); + if (!localBatchBuffer.isEmpty() && isToSpool) { + // Let spool to the file directly + if (isDestActive) { + logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName()); + } + + isDestActive = false; + // Just before stashing + lastDispatchTime = System.currentTimeMillis(); + + fileSpooler.stashLogs(localBatchBuffer); + addStashedCount(localBatchBuffer.size()); + + localBatchBuffer.clear(); + } else if (!localBatchBuffer.isEmpty() && (isDrain() || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) { + if (fileSpoolerEnabled && !isDestActive) { + logger.info("Switching to writing to the destination. Queue = {}, dest = {}", getName(), consumer.getName()); + } + + // Reset time just before sending the logs + lastDispatchTime = System.currentTimeMillis(); + + boolean ret = consumer.log(localBatchBuffer); + + if (!ret) { + if (fileSpoolerEnabled) { + logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName()); + + // Transient error. Stash and move on + fileSpooler.stashLogs(localBatchBuffer); + + isDestActive = false; + + addStashedCount(localBatchBuffer.size()); + } else { + // We need to drop this event + addFailedCount(localBatchBuffer.size()); + logFailedEvent(localBatchBuffer); + } + } else { + isDestActive = true; + + addSuccessCount(localBatchBuffer.size()); + } + + localBatchBuffer.clear(); + } + + if (isDrain()) { + if (!queue.isEmpty() || !localBatchBuffer.isEmpty()) { + logger.info("Queue is not empty. Will retry. queue.size = {}, localBatchBuffer.size = {}", queue.size(), localBatchBuffer.size()); + } else { + break; + } + + if (isDrainMaxTimeElapsed()) { + logger.warn("Exiting polling loop because max time allowed reached. name={}, waited for {} ms", getName(), stopTime - System.currentTimeMillis()); + } + } + } + + logger.info("Exiting consumerThread. Queue = {}, dest = {}", getName(), consumer.getName()); + + try { + // Call stop on the consumer + logger.info("Calling to stop consumer. name = {}, consumer.name = {}", getName(), consumer.getName()); + + consumer.stop(); + + if (fileSpoolerEnabled) { + fileSpooler.stop(); + } + } catch (Throwable t) { + logger.error("Error while calling stop on consumer.", t); + } + + logStatus(); + + logger.info("Exiting consumerThread.run() method. name={}", getName()); + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java index c61d99af58..61601befe6 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java @@ -20,8 +20,8 @@ package org.apache.ranger.audit.queue; import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.provider.AuditHandler; import org.apache.ranger.audit.model.AuthzAuditEvent; +import org.apache.ranger.audit.provider.AuditHandler; import org.apache.ranger.audit.provider.MiscUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,15 +30,14 @@ import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; -import java.io.FileFilter; import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Date; @@ -58,60 +57,50 @@ public class AuditFileCacheProviderSpool implements Runnable { private static final Logger logger = LoggerFactory.getLogger(AuditFileCacheProviderSpool.class); - public enum SPOOL_FILE_STATUS { - pending, write_inprogress, read_inprogress, done - } - - public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; - public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; - public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; - public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files"; - public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; - public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; - public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; - public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; - public static final String PROP_FILE_SPOOL_BATCH_SIZE = "filespool.buffer.size"; - + public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; + public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; + public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; + public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files"; + public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; + public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; + public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; + public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; + public static final String PROP_FILE_SPOOL_BATCH_SIZE = "filespool.buffer.size"; public static final String AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP = "xasecure.audit.provider.filecache.is.enabled"; - public static final String FILE_CACHE_PROVIDER_NAME = "AuditFileCacheProviderSpool"; - - AuditHandler consumerProvider = null; + public static final String FILE_CACHE_PROVIDER_NAME = "AuditFileCacheProviderSpool"; - BlockingQueue indexQueue = new LinkedBlockingQueue(); - List indexRecords = new ArrayList(); + AuditHandler consumerProvider; + BlockingQueue indexQueue = new LinkedBlockingQueue<>(); + List indexRecords = new ArrayList<>(); // Folder and File attributes - File logFolder = null; - String logFileNameFormat = null; - File archiveFolder = null; - String fileNamePrefix = null; - String indexFileName = null; - File indexFile = null; - String indexDoneFileName = null; - File indexDoneFile = null; - int retryDestinationMS = 30 * 1000; // Default 30 seconds - int fileRolloverSec = 24 * 60 * 60; // In seconds - int maxArchiveFiles = 100; - int errorLogIntervalMS = 30 * 1000; // Every 30 seconds - int auditBatchSize = 1000; - long lastErrorLogMS = 0; - boolean isAuditFileCacheProviderEnabled = false; - boolean closeFile = false; - boolean isPending = false; - long lastAttemptTime = 0; - boolean initDone = false; - - PrintWriter logWriter = null; - AuditIndexRecord currentWriterIndexRecord = null; - AuditIndexRecord currentConsumerIndexRecord = null; - - BufferedReader logReader = null; - Thread destinationThread = null; - - boolean isWriting = true; - boolean isDrain = false; - boolean isDestDown = false; - boolean isSpoolingSuccessful = true; + File logFolder; + String logFileNameFormat; + File archiveFolder; + String fileNamePrefix; + String indexFileName; + File indexFile; + String indexDoneFileName; + File indexDoneFile; + long lastErrorLogMS; + boolean isAuditFileCacheProviderEnabled; + boolean closeFile; + boolean isPending; + long lastAttemptTime; + boolean initDone; + PrintWriter logWriter; + AuditIndexRecord currentWriterIndexRecord; + AuditIndexRecord currentConsumerIndexRecord; + Thread destinationThread; + boolean isDrain; + boolean isDestDown; + int retryDestinationMS = 30 * 1000; // Default 30 seconds + int fileRolloverSec = 24 * 60 * 60; // In seconds + int maxArchiveFiles = 100; + int errorLogIntervalMS = 30 * 1000; // Every 30 seconds + int auditBatchSize = 1000; + boolean isWriting = true; + boolean isSpoolingSuccessful = true; public AuditFileCacheProviderSpool(AuditHandler consumerProvider) { this.consumerProvider = consumerProvider; @@ -125,129 +114,122 @@ public boolean init(Properties props, String basePropertyName) { logger.debug("==> AuditFileCacheProviderSpool.init()"); if (initDone) { - logger.error("init() called more than once. queueProvider=" - + "" + ", consumerProvider=" - + consumerProvider.getName()); + logger.error("init() called more than once. queueProvider=, consumerProvider={}", consumerProvider.getName()); + return true; } + String propPrefix = "xasecure.audit.filespool"; + if (basePropertyName != null) { propPrefix = basePropertyName; } try { // Initial folder and file properties - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_LOCAL_DIR); - logFileNameFormat = MiscUtil.getStringProperty(props, - basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); - String archiveFolderProp = MiscUtil.getStringProperty(props, - propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); - fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILENAME_PREFIX); - indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_INDEX_FILE); - retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); - fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); - maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_LOCAL_DIR); + String archiveFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); + + logFileNameFormat = MiscUtil.getStringProperty(props, basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); + + fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILENAME_PREFIX); + indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_INDEX_FILE); + retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); + fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); + maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); isAuditFileCacheProviderEnabled = MiscUtil.getBooleanProperty(props, AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP, false); - logger.info("retryDestinationMS=" + retryDestinationMS - + ", queueName=" + FILE_CACHE_PROVIDER_NAME); - logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); - logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + + logger.info("retryDestinationMS={}, queueName={}", retryDestinationMS, FILE_CACHE_PROVIDER_NAME); + logger.info("fileRolloverSec={}, queueName={}", fileRolloverSec, FILE_CACHE_PROVIDER_NAME); + logger.info("maxArchiveFiles={}, queueName={}", maxArchiveFiles, FILE_CACHE_PROVIDER_NAME); if (logFolderProp == null || logFolderProp.isEmpty()) { - logger.error("Audit spool folder is not configured. Please set " - + propPrefix - + "." - + PROP_FILE_SPOOL_LOCAL_DIR - + ". queueName=" + FILE_CACHE_PROVIDER_NAME); + logger.error("Audit spool folder is not configured. Please set {}.{}.queueName={}", propPrefix, PROP_FILE_SPOOL_LOCAL_DIR, FILE_CACHE_PROVIDER_NAME); + return false; } + logFolder = new File(logFolderProp); + if (!logFolder.isDirectory()) { boolean result = logFolder.mkdirs(); + if (!logFolder.isDirectory() || !result) { - logger.error("File Spool folder not found and can't be created. folder=" - + logFolder.getAbsolutePath() - + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + logger.error("File Spool folder not found and can't be created. folder={}, queueName={}", logFolder.getAbsolutePath(), FILE_CACHE_PROVIDER_NAME); + return false; } } - logger.info("logFolder=" + logFolder + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + + logger.info("logFolder={}, queueName={}", logFolder, FILE_CACHE_PROVIDER_NAME); if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { - logFileNameFormat = "spool_" + "%app-type%" + "_" - + "%time:yyyyMMdd-HHmm.ss%.log"; + logFileNameFormat = "spool_" + "%app-type%" + "_" + "%time:yyyyMMdd-HHmm.ss%.log"; } - logger.info("logFileNameFormat=" + logFileNameFormat - + ", queueName=" + FILE_CACHE_PROVIDER_NAME); + + logger.info("logFileNameFormat={}, queueName={}", logFileNameFormat, FILE_CACHE_PROVIDER_NAME); if (archiveFolderProp == null || archiveFolderProp.isEmpty()) { archiveFolder = new File(logFolder, "archive"); } else { archiveFolder = new File(archiveFolderProp); } + if (!archiveFolder.isDirectory()) { boolean result = archiveFolder.mkdirs(); + if (!archiveFolder.isDirectory() || !result) { - logger.error("File Spool archive folder not found and can't be created. folder=" - + archiveFolder.getAbsolutePath() - + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + logger.error("File Spool archive folder not found and can't be created. folder={}, queueName={}", archiveFolder.getAbsolutePath(), FILE_CACHE_PROVIDER_NAME); + return false; } } - logger.info("archiveFolder=" + archiveFolder + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + + logger.info("archiveFolder={}, queueName={}", archiveFolder, FILE_CACHE_PROVIDER_NAME); if (indexFileName == null || indexFileName.isEmpty()) { if (fileNamePrefix == null || fileNamePrefix.isEmpty()) { - fileNamePrefix = FILE_CACHE_PROVIDER_NAME + "_" - + consumerProvider.getName(); + fileNamePrefix = FILE_CACHE_PROVIDER_NAME + "_" + consumerProvider.getName(); } - indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" - + ".json"; - indexFileName = MiscUtil.replaceTokens(indexFileName, - System.currentTimeMillis()); + + indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" + ".json"; + indexFileName = MiscUtil.replaceTokens(indexFileName, System.currentTimeMillis()); } indexFile = new File(logFolder, indexFileName); + if (!indexFile.exists()) { boolean ret = indexFile.createNewFile(); + if (!ret) { - logger.error("Error creating index file. fileName=" - + indexFile.getPath()); + logger.error("Error creating index file. fileName={}", indexFile.getPath()); + return false; } } - logger.info("indexFile=" + indexFile + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + + logger.info("indexFile={}, queueName={}", indexFile, FILE_CACHE_PROVIDER_NAME); int lastDot = indexFileName.lastIndexOf('.'); + if (lastDot < 0) { lastDot = indexFileName.length() - 1; } - indexDoneFileName = indexFileName.substring(0, lastDot) - + "_closed.json"; - indexDoneFile = new File(logFolder, indexDoneFileName); + + indexDoneFileName = indexFileName.substring(0, lastDot) + "_closed.json"; + indexDoneFile = new File(logFolder, indexDoneFileName); + if (!indexDoneFile.exists()) { boolean ret = indexDoneFile.createNewFile(); + if (!ret) { - logger.error("Error creating index done file. fileName=" - + indexDoneFile.getPath()); + logger.error("Error creating index done file. fileName={}", indexDoneFile.getPath()); + return false; } } - logger.info("indexDoneFile=" + indexDoneFile + ", queueName=" - + FILE_CACHE_PROVIDER_NAME); + + logger.info("indexDoneFile={}, queueName={}", indexDoneFile, FILE_CACHE_PROVIDER_NAME); // Load index file loadIndexFile(); @@ -255,42 +237,39 @@ public boolean init(Properties props, String basePropertyName) { if (!auditIndexRecord.status.equals(SPOOL_FILE_STATUS.done)) { isPending = true; } - if (auditIndexRecord.status - .equals(SPOOL_FILE_STATUS.write_inprogress)) { + + if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.write_inprogress)) { currentWriterIndexRecord = auditIndexRecord; - logger.info("currentWriterIndexRecord=" - + currentWriterIndexRecord.filePath - + ", queueName=" + FILE_CACHE_PROVIDER_NAME); + + logger.info("currentWriterIndexRecord={}, queueName={}", currentWriterIndexRecord.filePath, FILE_CACHE_PROVIDER_NAME); } - if (auditIndexRecord.status - .equals(SPOOL_FILE_STATUS.read_inprogress)) { + + if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.read_inprogress)) { indexQueue.add(auditIndexRecord); } } + printIndex(); - for (int i = 0; i < indexRecords.size(); i++) { - AuditIndexRecord auditIndexRecord = indexRecords.get(i); + + for (AuditIndexRecord auditIndexRecord : indexRecords) { if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.pending)) { File consumerFile = new File(auditIndexRecord.filePath); + if (!consumerFile.exists()) { - logger.error("INIT: Consumer file=" - + consumerFile.getPath() + " not found."); + logger.error("INIT: Consumer file={} not found.", consumerFile.getPath()); } else { indexQueue.add(auditIndexRecord); } } } - } catch (Throwable t) { - logger.error("Error initializing File Spooler. queue=" - + FILE_CACHE_PROVIDER_NAME, t); + logger.error("Error initializing File Spooler. queue={}", FILE_CACHE_PROVIDER_NAME, t); + return false; } - auditBatchSize = MiscUtil.getIntProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_BATCH_SIZE, auditBatchSize); - - initDone = true; + auditBatchSize = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_BATCH_SIZE, auditBatchSize); + initDone = true; logger.debug("<== AuditFileCacheProviderSpool.init()"); return true; @@ -301,35 +280,35 @@ public boolean init(Properties props, String basePropertyName) { */ public void start() { if (!initDone) { - logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName=" - + FILE_CACHE_PROVIDER_NAME); + logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName={}", FILE_CACHE_PROVIDER_NAME); + return; } - logger.info("Starting writerThread, queueName=" - + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logger.info("Starting writerThread, queueName={}, consumer={}", FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); // Let's start the thread to read - destinationThread = new Thread(this, FILE_CACHE_PROVIDER_NAME + "_" - + consumerProvider.getName() + "_destWriter"); + destinationThread = new Thread(this, FILE_CACHE_PROVIDER_NAME + "_" + consumerProvider.getName() + "_destWriter"); + destinationThread.setDaemon(true); destinationThread.start(); } public void stop() { if (!initDone) { - logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName=" - + FILE_CACHE_PROVIDER_NAME); + logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName={}", FILE_CACHE_PROVIDER_NAME); + return; } - logger.info("Stop called, queueName=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + + logger.info("Stop called, queueName={}, consumer={}", FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); isDrain = true; + flush(); PrintWriter out = getOpenLogFileStream(); + if (out != null) { // If write is still going on, then let's give it enough time to // complete @@ -340,25 +319,28 @@ public void stop() { } catch (InterruptedException e) { // ignore } + continue; } + try { - logger.info("Closing open file, queueName=" - + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logger.info("Closing open file, queueName={}, consumer={}", FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); out.flush(); out.close(); + break; } catch (Throwable t) { logger.debug("Error closing spool out file.", t); } } } + try { if (destinationThread != null) { destinationThread.interrupt(); } + destinationThread = null; } catch (Throwable e) { // ignore @@ -367,11 +349,13 @@ public void stop() { public void flush() { if (!initDone) { - logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName=" - + FILE_CACHE_PROVIDER_NAME); + logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName={}", FILE_CACHE_PROVIDER_NAME); + return; } + PrintWriter out = getOpenLogFileStream(); + if (out != null) { out.flush(); } @@ -385,8 +369,8 @@ public void flush() { */ public boolean isPending() { if (!initDone) { - logError("isPending(): File Spooler not initialized. queueName=" - + FILE_CACHE_PROVIDER_NAME); + logError("isPending(): File Spooler not initialized. queueName=" + FILE_CACHE_PROVIDER_NAME); + return false; } @@ -402,179 +386,210 @@ public long getLastAttemptTimeDelta() { if (lastAttemptTime == 0) { return 0; } + return System.currentTimeMillis() - lastAttemptTime; } - synchronized public void stashLogs(AuditEventBase event) { - + public synchronized void stashLogs(AuditEventBase event) { if (isDrain) { // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event=" - + event); + logger.error("stashLogs() is called after stop is called. event={}", event); + return; } + try { isWriting = true; - PrintWriter logOut = getLogFileStream(); - // Convert event to json - String jsonStr = MiscUtil.stringify(event); + + PrintWriter logOut = getLogFileStream(); + String jsonStr = MiscUtil.stringify(event); // Convert event to json + logOut.println(jsonStr); logOut.flush(); - isPending = true; + + isPending = true; isSpoolingSuccessful = true; - } catch (Throwable t) { + } catch (Throwable t) { isSpoolingSuccessful = false; - logger.error("Error writing to file. event=" + event, t); + + logger.error("Error writing to file. event={}", event, t); } finally { isWriting = false; } - } - synchronized public void stashLogs(Collection events) { + public synchronized void stashLogs(Collection events) { for (AuditEventBase event : events) { stashLogs(event); } + flush(); } - synchronized public void stashLogsString(String event) { + public synchronized void stashLogsString(String event) { if (isDrain) { // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event=" - + event); + logger.error("stashLogs() is called after stop is called. event={}", event); + return; } + try { isWriting = true; + PrintWriter logOut = getLogFileStream(); + logOut.println(event); } catch (Exception ex) { - logger.error("Error writing to file. event=" + event, ex); + logger.error("Error writing to file. event={}", event, ex); } finally { isWriting = false; } - } - synchronized public boolean isSpoolingSuccessful() { + public synchronized boolean isSpoolingSuccessful() { return isSpoolingSuccessful; } - synchronized public void stashLogsString(Collection events) { + public synchronized void stashLogsString(Collection events) { for (String event : events) { stashLogsString(event); } + flush(); } - /** - * This return the current file. If there are not current open output file, - * then it will return null + /* + * (non-Javadoc) * - * @return - * @throws Exception + * @see java.lang.Runnable#run() */ - synchronized private PrintWriter getOpenLogFileStream() { - return logWriter; + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Throwable t) { + logger.error("Exited thread without abnormaly. queue={}", consumerProvider.getName(), t); + } } - /** - * @return - * @throws Exception - */ - synchronized private PrintWriter getLogFileStream() throws Exception { - closeFileIfNeeded(); - // Either there are no open log file or the previous one has been rolled - // over - if (currentWriterIndexRecord == null) { - Date currentTime = new Date(); - // Create a new file - String fileName = MiscUtil.replaceTokens(logFileNameFormat, - currentTime.getTime()); - String newFileName = fileName; - File outLogFile = null; - int i = 0; - while (true) { - outLogFile = new File(logFolder, newFileName); - File archiveLogFile = new File(archiveFolder, newFileName); - if (!outLogFile.exists() && !archiveLogFile.exists()) { + public void runLogAudit() { + // boolean isResumed = false; + while (true) { + try { + if (isDestDown) { + logger.info("Destination is down. sleeping for {} milli seconds. indexQueue={}, queueName={}, consumer={}", retryDestinationMS, indexQueue.size(), FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); + + Thread.sleep(retryDestinationMS); + } + + // Let's pause between each iteration + if (currentConsumerIndexRecord == null) { + currentConsumerIndexRecord = indexQueue.poll(retryDestinationMS, TimeUnit.MILLISECONDS); + } else { + Thread.sleep(retryDestinationMS); + } + + if (isDrain) { + // Need to exit break; } - i++; - int lastDot = fileName.lastIndexOf('.'); - String baseName = fileName.substring(0, lastDot); - String extension = fileName.substring(lastDot); - newFileName = baseName + "." + i + extension; - } - fileName = newFileName; - logger.info("Creating new file. queueName=" - + FILE_CACHE_PROVIDER_NAME + ", fileName=" + fileName); - // Open the file - logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - outLogFile),"UTF-8"))); - AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); + if (currentConsumerIndexRecord == null) { + closeFileIfNeeded(); - tmpIndexRecord.id = MiscUtil.generateUniqueId(); - tmpIndexRecord.filePath = outLogFile.getPath(); - tmpIndexRecord.status = SPOOL_FILE_STATUS.write_inprogress; - tmpIndexRecord.fileCreateTime = currentTime; - tmpIndexRecord.lastAttempt = true; - currentWriterIndexRecord = tmpIndexRecord; - indexRecords.add(currentWriterIndexRecord); - saveIndexFile(); + continue; + } - } else { - if (logWriter == null) { - // This means the process just started. We need to open the file - // in append mode. - logger.info("Opening existing file for append. queueName=" - + FILE_CACHE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.filePath); - logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - currentWriterIndexRecord.filePath, true),"UTF-8"))); - } - } - return logWriter; - } + boolean isRemoveIndex = false; + File consumerFile = new File(currentConsumerIndexRecord.filePath); - synchronized private void closeFileIfNeeded() throws FileNotFoundException, - IOException { - // Is there file open to write or there are no pending file, then close - // the active file - if (currentWriterIndexRecord != null) { - // Check whether the file needs to rolled - rollOverSpoolFileByTime(); + if (!consumerFile.exists()) { + logger.error("Consumer file={} not found.", consumerFile.getPath()); - if (closeFile) { - // Roll the file - if (logWriter != null) { - logWriter.flush(); - logWriter.close(); - logWriter = null; - closeFile = false; + printIndex(); + + isRemoveIndex = true; + } else { + // Let's open the file to write + try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(currentConsumerIndexRecord.filePath), StandardCharsets.UTF_8))) { + int startLine = currentConsumerIndexRecord.linePosition; + int currLine = 0; + List events = new ArrayList<>(); + + for (String line = br.readLine(); line != null; line = br.readLine()) { + currLine++; + + if (currLine < startLine) { + continue; + } + + AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class); + + events.add(event); + + if (events.size() == auditBatchSize) { + boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine); + + if (!ret) { + throw new Exception("Destination down"); + } + + events.clear(); + } + } + + if (!events.isEmpty()) { + boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine); + + if (!ret) { + throw new Exception("Destination down"); + } + + events.clear(); + } + + logger.info("Done reading file. file={}, queueName={}, consumer={}", currentConsumerIndexRecord.filePath, FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); + + // The entire file is read + currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done; + currentConsumerIndexRecord.doneCompleteTime = new Date(); + currentConsumerIndexRecord.lastAttempt = true; + + isRemoveIndex = true; + } catch (Exception ex) { + isDestDown = true; + + logError("Destination down. queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName()); + + lastAttemptTime = System.currentTimeMillis(); + // Update the index file + currentConsumerIndexRecord.lastFailedTime = new Date(); + currentConsumerIndexRecord.failedAttemptCount++; + currentConsumerIndexRecord.lastAttempt = false; + + saveIndexFile(); + } } - currentWriterIndexRecord.status = SPOOL_FILE_STATUS.pending; - currentWriterIndexRecord.writeCompleteTime = new Date(); - saveIndexFile(); - logger.info("Adding file to queue. queueName=" - + FILE_CACHE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.filePath); - indexQueue.add(currentWriterIndexRecord); - currentWriterIndexRecord = null; + + if (isRemoveIndex) { + // Remove this entry from index + removeIndexRecord(currentConsumerIndexRecord); + + currentConsumerIndexRecord = null; + + closeFileIfNeeded(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + } catch (Throwable t) { + logger.error("Exception in destination writing thread.", t); } } - } - private void rollOverSpoolFileByTime() { - if (System.currentTimeMillis() - - currentWriterIndexRecord.fileCreateTime.getTime() > fileRolloverSec * 1000) { - closeFile = true; - logger.info("Closing file. Rolling over. queueName=" - + FILE_CACHE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.filePath); - } + logger.info("Exiting file spooler. provider={}, consumer={}", FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); } /** @@ -583,360 +598,342 @@ private void rollOverSpoolFileByTime() { * @throws IOException */ void loadIndexFile() throws IOException { - logger.info("Loading index file. fileName=" + indexFile.getPath()); - BufferedReader br = null; - try { - br = new BufferedReader(new InputStreamReader(new FileInputStream(indexFile), "UTF-8")); + logger.info("Loading index file. fileName={}", indexFile.getPath()); + + try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(indexFile), StandardCharsets.UTF_8))) { indexRecords.clear(); - String line; - while ((line = br.readLine()) != null) { + + for (String line = br.readLine(); line != null; line = br.readLine()) { if (!line.isEmpty() && !line.startsWith("#")) { - try { - AuditIndexRecord record = MiscUtil.fromJson(line, - AuditIndexRecord.class); - indexRecords.add(record); - } catch (Exception e) { - logger.error("Error parsing following JSON: "+line, e); - } + try { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + indexRecords.add(record); + } catch (Exception e) { + logger.error("Error parsing following JSON: {}", line, e); + } } } - } finally { - if (br!= null) { - br.close(); - } } } synchronized void printIndex() { logger.info("INDEX printIndex() ==== START"); - Iterator iter = indexRecords.iterator(); - while (iter.hasNext()) { - AuditIndexRecord record = iter.next(); - logger.info("INDEX=" + record + ", isFileExist=" - + (new File(record.filePath).exists())); + + for (AuditIndexRecord record : indexRecords) { + logger.info("INDEX={}, isFileExist={}", record, (new File(record.filePath).exists())); } + logger.info("INDEX printIndex() ==== END"); } - synchronized void removeIndexRecord(AuditIndexRecord indexRecord) - throws FileNotFoundException, IOException { + synchronized void removeIndexRecord(AuditIndexRecord indexRecord) throws IOException { Iterator iter = indexRecords.iterator(); + while (iter.hasNext()) { AuditIndexRecord record = iter.next(); + if (record.id.equals(indexRecord.id)) { - logger.info("Removing file from index. file=" + record.filePath - + ", queueName=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + logger.info("Removing file from index. file={}, queueName={}, consumer={}", record.filePath, FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); iter.remove(); + appendToDoneFile(record); } } + saveIndexFile(); - // If there are no more files in the index, then let's assume the - // destination is now available - if (indexRecords.size() == 0) { + + // If there are no more files in the index, then let's assume the destination is now available + if (indexRecords.isEmpty()) { isPending = false; } } - synchronized void saveIndexFile() throws FileNotFoundException, IOException { - PrintWriter out = new PrintWriter(indexFile,"UTF-8"); + synchronized void saveIndexFile() throws IOException { + PrintWriter out = new PrintWriter(indexFile, StandardCharsets.UTF_8.name()); + for (AuditIndexRecord auditIndexRecord : indexRecords) { out.println(MiscUtil.stringify(auditIndexRecord)); } + out.close(); // printIndex(); - } - void appendToDoneFile(AuditIndexRecord indexRecord) - throws FileNotFoundException, IOException { - logger.info("Moving to done file. " + indexRecord.filePath - + ", queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); - String line = MiscUtil.stringify(indexRecord); - PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - indexDoneFile, true),"UTF-8"))); - out.println(line); - out.flush(); - out.close(); + void appendToDoneFile(AuditIndexRecord indexRecord) throws IOException { + logger.info("Moving to done file. {}, queueName={}, consumer={}", indexRecord.filePath, FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); + + try (PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(indexDoneFile, true), StandardCharsets.UTF_8)))) { + String line = MiscUtil.stringify(indexRecord); + + out.println(line); + out.flush(); + } // After Each file is read and audit events are pushed into pipe, we flush to reach the destination immediate. consumerProvider.flush(); // Move to archive folder - File logFile = null; + File logFile = null; File archiveFile = null; + try { - logFile = new File(indexRecord.filePath); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - logger.info("Moving logFile " + logFile + " to " + archiveFile); + logFile = new File(indexRecord.filePath); + archiveFile = new File(archiveFolder, logFile.getName()); + + logger.info("Moving logFile {} to {}", logFile, archiveFile); + boolean result = logFile.renameTo(archiveFile); + if (!result) { - logger.error("Error moving log file to archive folder. Unable to rename" - + logFile + " to archiveFile=" + archiveFile); + logger.error("Error moving log file to archive folder. Unable to rename {} to archiveFile={}", logFile, archiveFile); } } catch (Throwable t) { - logger.error("Error moving log file to archive folder. logFile=" - + logFile + ", archiveFile=" + archiveFile, t); + logger.error("Error moving log file to archive folder. logFile={}, archiveFile={}", logFile, archiveFile, t); } // After archiving the file flush the pipe consumerProvider.flush(); archiveFile = null; + try { // Remove old files - File[] logFiles = archiveFolder.listFiles(new FileFilter() { - public boolean accept(File pathname) { - return pathname.getName().toLowerCase().endsWith(".log"); - } - }); + File[] logFiles = archiveFolder.listFiles(pathname -> pathname.getName().toLowerCase().endsWith(".log")); if (logFiles != null && logFiles.length > maxArchiveFiles) { int filesToDelete = logFiles.length - maxArchiveFiles; - BufferedReader br = new BufferedReader(new FileReader( - indexDoneFile)); - try { + + try (BufferedReader br = new BufferedReader(new FileReader(indexDoneFile))) { int filesDeletedCount = 0; - while ((line = br.readLine()) != null) { + + for (String line = br.readLine(); line != null; line = br.readLine()) { if (!line.isEmpty() && !line.startsWith("#")) { - try { - AuditIndexRecord record = MiscUtil.fromJson(line, - AuditIndexRecord.class); - logFile = new File(record.filePath); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - if (archiveFile.exists()) { - logger.info("Deleting archive file " - + archiveFile); - boolean ret = archiveFile.delete(); - if (!ret) { - logger.error("Error deleting archive file. archiveFile=" - + archiveFile); + try { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + if (record == null) { + logger.warn("failed to parse index record: {}", line); + continue; } - filesDeletedCount++; - if (filesDeletedCount >= filesToDelete) { - logger.info("Deleted " + filesDeletedCount - + " files"); - break; + + logFile = new File(record.filePath); + archiveFile = new File(archiveFolder, logFile.getName()); + + if (archiveFile.exists()) { + logger.info("Deleting archive file {}", archiveFile); + + boolean ret = archiveFile.delete(); + + if (!ret) { + logger.error("Error deleting archive file. archiveFile={}", archiveFile); + } + + filesDeletedCount++; + + if (filesDeletedCount >= filesToDelete) { + logger.info("Deleted {} files", filesDeletedCount); + + break; + } } + } catch (Exception e) { + logger.error("Error parsing following JSON: {}", line, e); } - } catch (Exception e) { - logger.error("Error parsing following JSON: "+line, e); - } } } - } finally { - br.close(); } } } catch (Throwable t) { - logger.error("Error deleting older archive file. archiveFile=" - + archiveFile, t); + logger.error("Error deleting older archive file. archiveFile={}", archiveFile, t); } - } void logError(String msg) { long currTimeMS = System.currentTimeMillis(); + if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { logger.error(msg); - lastErrorLogMS = currTimeMS; - } - } - class AuditIndexRecord { - String id; - String filePath; - int linePosition = 0; - SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress; - Date fileCreateTime; - Date writeCompleteTime; - Date doneCompleteTime; - Date lastSuccessTime; - Date lastFailedTime; - int failedAttemptCount = 0; - boolean lastAttempt = false; - - @Override - public String toString() { - return "AuditIndexRecord [id=" + id + ", filePath=" + filePath - + ", linePosition=" + linePosition + ", status=" + status - + ", fileCreateTime=" + fileCreateTime - + ", writeCompleteTime=" + writeCompleteTime - + ", doneCompleteTime=" + doneCompleteTime - + ", lastSuccessTime=" + lastSuccessTime - + ", lastFailedTime=" + lastFailedTime - + ", failedAttemptCount=" + failedAttemptCount - + ", lastAttempt=" + lastAttempt + "]"; + lastErrorLogMS = currTimeMS; } - } - /* - * (non-Javadoc) + /** + * This return the current file. If there are not current open output file, + * then it will return null * - * @see java.lang.Runnable#run() + * @return */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Throwable t) { - logger.error("Exited thread without abnormaly. queue=" - + consumerProvider.getName(), t); - } + private synchronized PrintWriter getOpenLogFileStream() { + return logWriter; } - public void runLogAudit() { - // boolean isResumed = false; - while (true) { - try { - if (isDestDown) { - logger.info("Destination is down. sleeping for " - + retryDestinationMS - + " milli seconds. indexQueue=" + indexQueue.size() - + ", queueName=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); - Thread.sleep(retryDestinationMS); - } - // Let's pause between each iteration - if (currentConsumerIndexRecord == null) { - currentConsumerIndexRecord = indexQueue.poll( - retryDestinationMS, TimeUnit.MILLISECONDS); - } else { - Thread.sleep(retryDestinationMS); - } + /** + * @return + * @throws Exception + */ + private synchronized PrintWriter getLogFileStream() throws Exception { + closeFileIfNeeded(); - if (isDrain) { - // Need to exit + // Either there are no open log file or the previous one has been rolled over + if (currentWriterIndexRecord == null) { + // Create a new file + Date currentTime = new Date(); + String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); + String newFileName = fileName; + File outLogFile; + int i = 0; + + while (true) { + outLogFile = new File(logFolder, newFileName); + + File archiveLogFile = new File(archiveFolder, newFileName); + + if (!outLogFile.exists() && !archiveLogFile.exists()) { break; } - if (currentConsumerIndexRecord == null) { - closeFileIfNeeded(); - continue; - } - boolean isRemoveIndex = false; - File consumerFile = new File( - currentConsumerIndexRecord.filePath); - if (!consumerFile.exists()) { - logger.error("Consumer file=" + consumerFile.getPath() - + " not found."); - printIndex(); - isRemoveIndex = true; - } else { - // Let's open the file to write - BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream( - currentConsumerIndexRecord.filePath),"UTF-8")); - try { - int startLine = currentConsumerIndexRecord.linePosition; - String line; - int currLine = 0; - List events = new ArrayList<>(); - while ((line = br.readLine()) != null) { - currLine++; - if (currLine < startLine) { - continue; - } - AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class); - events.add(event); + i++; - if (events.size() == auditBatchSize) { - boolean ret = sendEvent(events, - currentConsumerIndexRecord, currLine); - if (!ret) { - throw new Exception("Destination down"); - } - events.clear(); - } - } - if (events.size() > 0) { - boolean ret = sendEvent(events, - currentConsumerIndexRecord, currLine); - if (!ret) { - throw new Exception("Destination down"); - } - events.clear(); - } - logger.info("Done reading file. file=" - + currentConsumerIndexRecord.filePath - + ", queueName=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); - // The entire file is read - currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done; - currentConsumerIndexRecord.doneCompleteTime = new Date(); - currentConsumerIndexRecord.lastAttempt = true; + int lastDot = fileName.lastIndexOf('.'); + String baseName = fileName.substring(0, lastDot); + String extension = fileName.substring(lastDot); - isRemoveIndex = true; - } catch (Exception ex) { - isDestDown = true; - logError("Destination down. queueName=" - + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); - lastAttemptTime = System.currentTimeMillis(); - // Update the index file - currentConsumerIndexRecord.lastFailedTime = new Date(); - currentConsumerIndexRecord.failedAttemptCount++; - currentConsumerIndexRecord.lastAttempt = false; - saveIndexFile(); - } finally { - br.close(); - } - } - if (isRemoveIndex) { - // Remove this entry from index - removeIndexRecord(currentConsumerIndexRecord); - currentConsumerIndexRecord = null; - closeFileIfNeeded(); + newFileName = baseName + "." + i + extension; + } + + fileName = newFileName; + + logger.info("Creating new file. queueName={}, fileName={}", FILE_CACHE_PROVIDER_NAME, fileName); + + // Open the file + logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outLogFile), StandardCharsets.UTF_8))); + + AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); + + tmpIndexRecord.id = MiscUtil.generateUniqueId(); + tmpIndexRecord.filePath = outLogFile.getPath(); + tmpIndexRecord.status = SPOOL_FILE_STATUS.write_inprogress; + tmpIndexRecord.fileCreateTime = currentTime; + tmpIndexRecord.lastAttempt = true; + + currentWriterIndexRecord = tmpIndexRecord; + + indexRecords.add(currentWriterIndexRecord); + + saveIndexFile(); + } else { + if (logWriter == null) { + // This means the process just started. We need to open the file in append mode. + logger.info("Opening existing file for append. queueName={}, fileName={}", FILE_CACHE_PROVIDER_NAME, currentWriterIndexRecord.filePath); + + logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(currentWriterIndexRecord.filePath, true), StandardCharsets.UTF_8))); + } + } + + return logWriter; + } + + private synchronized void closeFileIfNeeded() throws IOException { + // Is there file open to write or there are no pending file, then close the active file + + if (currentWriterIndexRecord != null) { + // Check whether the file needs to rolled + rollOverSpoolFileByTime(); + + if (closeFile) { + // Roll the file + if (logWriter != null) { + logWriter.flush(); + logWriter.close(); + + logWriter = null; + closeFile = false; } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - } catch (Throwable t) { - logger.error("Exception in destination writing thread.", t); + + currentWriterIndexRecord.status = SPOOL_FILE_STATUS.pending; + currentWriterIndexRecord.writeCompleteTime = new Date(); + + saveIndexFile(); + + logger.info("Adding file to queue. queueName={}, fileName={}", FILE_CACHE_PROVIDER_NAME, currentWriterIndexRecord.filePath); + + indexQueue.add(currentWriterIndexRecord); + + currentWriterIndexRecord = null; } } - logger.info("Exiting file spooler. provider=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); } - private boolean sendEvent(List events, AuditIndexRecord indexRecord, - int currLine) { + private void rollOverSpoolFileByTime() { + if (System.currentTimeMillis() - currentWriterIndexRecord.fileCreateTime.getTime() > fileRolloverSec * 1000L) { + closeFile = true; + + logger.info("Closing file. Rolling over. queueName={}, fileName={}", FILE_CACHE_PROVIDER_NAME, currentWriterIndexRecord.filePath); + } + } + + private boolean sendEvent(List events, AuditIndexRecord indexRecord, int currLine) { boolean ret = true; + try { ret = consumerProvider.log(events); + if (!ret) { // Need to log error after fixed interval - logError("Error sending logs to consumer. provider=" - + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logError("Error sending logs to consumer. provider=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName()); } else { // Update index and save - indexRecord.linePosition = currLine; - indexRecord.status = SPOOL_FILE_STATUS.read_inprogress; + indexRecord.linePosition = currLine; + indexRecord.status = SPOOL_FILE_STATUS.read_inprogress; indexRecord.lastSuccessTime = new Date(); - indexRecord.lastAttempt = true; + indexRecord.lastAttempt = true; + saveIndexFile(); if (isDestDown) { isDestDown = false; - logger.info("Destination up now. " + indexRecord.filePath - + ", queueName=" + FILE_CACHE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + + logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.filePath, FILE_CACHE_PROVIDER_NAME, consumerProvider.getName()); } } } catch (Throwable t) { - logger.error("Error while sending logs to consumer. provider=" - + FILE_CACHE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName() + ", log=" + events, t); + logger.error("Error while sending logs to consumer. provider={}, consumer={}, log={}", FILE_CACHE_PROVIDER_NAME, consumerProvider.getName(), events, t); } return ret; } + public enum SPOOL_FILE_STATUS { + pending, write_inprogress, read_inprogress, done + } + + static class AuditIndexRecord { + String id; + String filePath; + int linePosition; + SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress; + Date fileCreateTime; + Date writeCompleteTime; + Date doneCompleteTime; + Date lastSuccessTime; + Date lastFailedTime; + int failedAttemptCount; + boolean lastAttempt; + + @Override + public String toString() { + return "AuditIndexRecord [id=" + id + ", filePath=" + filePath + + ", linePosition=" + linePosition + ", status=" + status + + ", fileCreateTime=" + fileCreateTime + + ", writeCompleteTime=" + writeCompleteTime + + ", doneCompleteTime=" + doneCompleteTime + + ", lastSuccessTime=" + lastSuccessTime + + ", lastFailedTime=" + lastFailedTime + + ", failedAttemptCount=" + failedAttemptCount + + ", lastAttempt=" + lastAttempt + "]"; + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java index a4e0683665..a5d9c437e9 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java @@ -30,61 +30,68 @@ /* AuditFileQueue class does the work of stashing the audit logs into Local Filesystem before sending it to the AuditBatchQueue Consumer */ - public class AuditFileQueue extends BaseAuditHandler { - private static final Logger logger = LoggerFactory.getLogger(AuditFileQueue.class); - - AuditFileQueueSpool fileSpooler = null; - AuditHandler consumer = null; + private static final Logger logger = LoggerFactory.getLogger(AuditFileQueue.class); static final String DEFAULT_NAME = "batch"; + AuditFileQueueSpool fileSpooler; + AuditHandler consumer; + public AuditFileQueue(AuditHandler consumer) { this.consumer = consumer; } public void init(Properties prop, String basePropertyName) { String propPrefix = "xasecure.audit.batch"; + if (basePropertyName != null) { propPrefix = basePropertyName; } + super.init(prop, propPrefix); //init AuditFileQueueSpooler thread to send Local logs to destination fileSpooler = new AuditFileQueueSpool(consumer); - fileSpooler.init(prop,propPrefix); + + fileSpooler.init(prop, propPrefix); } @Override public boolean log(AuditEventBase event) { boolean ret = false; - if ( event != null) { + + if (event != null) { fileSpooler.stashLogs(event); + if (fileSpooler.isSpoolingSuccessful()) { ret = true; } } + return ret; } @Override public boolean log(Collection events) { boolean ret = true; - if ( events != null) { + + if (events != null) { for (AuditEventBase event : events) { ret = log(event); } } + return ret; } - @Override public void start() { // Start the consumer thread if (consumer != null) { consumer.start(); } + if (fileSpooler != null) { // start AuditFileSpool thread fileSpooler.start(); @@ -93,7 +100,8 @@ public void start() { @Override public void stop() { - logger.info("Stop called. name=" + getName()); + logger.info("Stop called. name={}", getName()); + if (consumer != null) { consumer.stop(); } @@ -101,26 +109,28 @@ public void stop() { @Override public void waitToComplete() { - logger.info("waitToComplete called. name=" + getName()); - if ( consumer != null) { + logger.info("waitToComplete called. name={}", getName()); + + if (consumer != null) { consumer.waitToComplete(); } } @Override public void waitToComplete(long timeout) { - logger.info("waitToComplete called. name=" + getName()); - if ( consumer != null) { + logger.info("waitToComplete called. name={}", getName()); + + if (consumer != null) { consumer.waitToComplete(timeout); } } @Override public void flush() { - logger.info("waitToComplete. name=" + getName()); - if ( consumer != null) { + logger.info("waitToComplete. name={}", getName()); + + if (consumer != null) { consumer.flush(); } } - -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java index f87ec55ab1..abe1aec926 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.ranger.audit.queue; @@ -32,15 +32,14 @@ import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; -import java.io.FileFilter; import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Date; @@ -60,55 +59,50 @@ public class AuditFileQueueSpool implements Runnable { private static final Logger logger = LoggerFactory.getLogger(AuditFileQueueSpool.class); - public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; - public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; - public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; + public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; + public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; + public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files"; - public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; - public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; - public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; - public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; + public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; + public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; + public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; + public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; public static final String PROP_FILE_SPOOL_BATCH_SIZE = "filespool.buffer.size"; - public static final String FILE_QUEUE_PROVIDER_NAME = "AuditFileQueueSpool"; + public static final String FILE_QUEUE_PROVIDER_NAME = "AuditFileQueueSpool"; public static final String DEFAULT_AUDIT_FILE_TYPE = "json"; - AuditHandler consumerProvider = null; - BlockingQueue indexQueue = new LinkedBlockingQueue(); - List indexRecords = new ArrayList(); + AuditHandler consumerProvider; + BlockingQueue indexQueue = new LinkedBlockingQueue<>(); + List indexRecords = new ArrayList<>(); // Folder and File attributes - File logFolder = null; - String logFileNameFormat = null; - File archiveFolder = null; - String fileNamePrefix = null; - String indexFileName = null; - File indexFile = null; - String indexDoneFileName = null; - String auditFileType = null; - File indexDoneFile = null; - int retryDestinationMS = 30 * 1000; // Default 30 seconds - int fileRolloverSec = 24 * 60 * 60; // In seconds - int maxArchiveFiles = 100; - int errorLogIntervalMS = 30 * 1000; // Every 30 seconds - long lastErrorLogMS = 0; - boolean isAuditFileCacheProviderEnabled = false; - boolean closeFile = false; - boolean isPending = false; - long lastAttemptTime = 0; - long bufferSize = 1000; - boolean initDone = false; - - PrintWriter logWriter = null; - AuditIndexRecord currentWriterIndexRecord = null; - AuditIndexRecord currentConsumerIndexRecord = null; - - BufferedReader logReader = null; - Thread destinationThread = null; - - boolean isWriting = true; - boolean isDrain = false; - boolean isDestDown = false; - boolean isSpoolingSuccessful = true; + File logFolder; + String logFileNameFormat; + File archiveFolder; + String fileNamePrefix; + String indexFileName; + File indexFile; + String indexDoneFileName; + String auditFileType; + File indexDoneFile; + long bufferSize = 1000; + int retryDestinationMS = 30 * 1000; // Default 30 seconds + int fileRolloverSec = 24 * 60 * 60; // In seconds + int maxArchiveFiles = 100; + int errorLogIntervalMS = 30 * 1000; // Every 30 seconds + long lastErrorLogMS; + boolean closeFile; + boolean isPending; + long lastAttemptTime; + boolean initDone; + PrintWriter logWriter; + AuditIndexRecord currentWriterIndexRecord; + AuditIndexRecord currentConsumerIndexRecord; + Thread destinationThread; + boolean isDrain; + boolean isDestDown; + boolean isWriting = true; + boolean isSpoolingSuccessful = true; public AuditFileQueueSpool(AuditHandler consumerProvider) { this.consumerProvider = consumerProvider; @@ -122,155 +116,148 @@ public boolean init(Properties props, String basePropertyName) { logger.debug("==> AuditFileQueueSpool.init()"); if (initDone) { - logger.error("init() called more than once. queueProvider=" - + "" + ", consumerProvider=" - + consumerProvider.getName()); + logger.error("init() called more than once. queueProvider=, consumerProvider={}", consumerProvider.getName()); + return true; } + String propPrefix = "xasecure.audit.filespool"; + if (basePropertyName != null) { propPrefix = basePropertyName; } try { // Initial folder and file properties - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_LOCAL_DIR); - logFileNameFormat = MiscUtil.getStringProperty(props, - basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); - String archiveFolderProp = MiscUtil.getStringProperty(props, - propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); - fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILENAME_PREFIX); - indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_INDEX_FILE); - retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); - fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); - maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); - logger.info("retryDestinationMS=" + retryDestinationMS - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME); - logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); - logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_LOCAL_DIR); + String archiveFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); + + logFileNameFormat = MiscUtil.getStringProperty(props, basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); + fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILENAME_PREFIX); + indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_INDEX_FILE); + retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); + fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); + maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); + + logger.info("retryDestinationMS={}, queueName={}", retryDestinationMS, FILE_QUEUE_PROVIDER_NAME); + logger.info("fileRolloverSec={}, queueName={}", fileRolloverSec, FILE_QUEUE_PROVIDER_NAME); + logger.info("maxArchiveFiles={}, queueName={}", maxArchiveFiles, FILE_QUEUE_PROVIDER_NAME); if (logFolderProp == null || logFolderProp.isEmpty()) { - logger.error("Audit spool folder is not configured. Please set " - + propPrefix - + "." - + PROP_FILE_SPOOL_LOCAL_DIR - + ". queueName=" + FILE_QUEUE_PROVIDER_NAME); + logger.error("Audit spool folder is not configured. Please set {}.{}. queueName={}", propPrefix, PROP_FILE_SPOOL_LOCAL_DIR, FILE_QUEUE_PROVIDER_NAME); + return false; } + logFolder = new File(logFolderProp); + if (!logFolder.isDirectory()) { boolean result = logFolder.mkdirs(); + if (!logFolder.isDirectory() || !result) { - logger.error("File Spool folder not found and can't be created. folder=" - + logFolder.getAbsolutePath() - + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logger.error("File Spool folder not found and can't be created. folder={}, queueName={}", logFolder.getAbsolutePath(), FILE_QUEUE_PROVIDER_NAME); + return false; } } - logger.info("logFolder=" + logFolder + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + + logger.info("logFolder={}, queueName={}", logFolder, FILE_QUEUE_PROVIDER_NAME); if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { - logFileNameFormat = "spool_" + "%app-type%" + "_" - + "%time:yyyyMMdd-HHmm.ss%.log"; + logFileNameFormat = "spool_" + "%app-type%" + "_" + "%time:yyyyMMdd-HHmm.ss%.log"; } - logger.info("logFileNameFormat=" + logFileNameFormat - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME); + + logger.info("logFileNameFormat={}, queueName={}", logFileNameFormat, FILE_QUEUE_PROVIDER_NAME); if (archiveFolderProp == null || archiveFolderProp.isEmpty()) { archiveFolder = new File(logFolder, "archive"); } else { archiveFolder = new File(archiveFolderProp); } + if (!archiveFolder.isDirectory()) { boolean result = archiveFolder.mkdirs(); + if (!archiveFolder.isDirectory() || !result) { - logger.error("File Spool archive folder not found and can't be created. folder=" - + archiveFolder.getAbsolutePath() - + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logger.error("File Spool archive folder not found and can't be created. folder={}, queueName={}", archiveFolder.getAbsolutePath(), FILE_QUEUE_PROVIDER_NAME); + return false; } } - logger.info("archiveFolder=" + archiveFolder + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + + logger.info("archiveFolder={}, queueName={}", archiveFolder, FILE_QUEUE_PROVIDER_NAME); if (indexFileName == null || indexFileName.isEmpty()) { if (fileNamePrefix == null || fileNamePrefix.isEmpty()) { - fileNamePrefix = FILE_QUEUE_PROVIDER_NAME + "_" - + consumerProvider.getName(); + fileNamePrefix = FILE_QUEUE_PROVIDER_NAME + "_" + consumerProvider.getName(); } - indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" - + ".json"; - indexFileName = MiscUtil.replaceTokens(indexFileName, - System.currentTimeMillis()); + + indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" + ".json"; + indexFileName = MiscUtil.replaceTokens(indexFileName, System.currentTimeMillis()); } indexFile = new File(logFolder, indexFileName); + if (!indexFile.exists()) { boolean ret = indexFile.createNewFile(); + if (!ret) { - logger.error("Error creating index file. fileName=" - + indexFile.getPath()); + logger.error("Error creating index file. fileName={}", indexFile.getPath()); + return false; } } - logger.info("indexFile=" + indexFile + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + + logger.info("indexFile={}, queueName={}", indexFile, FILE_QUEUE_PROVIDER_NAME); int lastDot = indexFileName.lastIndexOf('.'); + if (lastDot < 0) { lastDot = indexFileName.length() - 1; } - indexDoneFileName = indexFileName.substring(0, lastDot) - + "_closed.json"; - indexDoneFile = new File(logFolder, indexDoneFileName); + + indexDoneFileName = indexFileName.substring(0, lastDot) + "_closed.json"; + indexDoneFile = new File(logFolder, indexDoneFileName); + if (!indexDoneFile.exists()) { boolean ret = indexDoneFile.createNewFile(); + if (!ret) { - logger.error("Error creating index done file. fileName=" - + indexDoneFile.getPath()); + logger.error("Error creating index done file. fileName={}", indexDoneFile.getPath()); + return false; } } - logger.info("indexDoneFile=" + indexDoneFile + ", queueName=" - + FILE_QUEUE_PROVIDER_NAME); + + logger.info("indexDoneFile={}, queueName={}", indexDoneFile, FILE_QUEUE_PROVIDER_NAME); // Load index file loadIndexFile(); + for (AuditIndexRecord auditIndexRecord : indexRecords) { if (!auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.done)) { isPending = true; } - if (auditIndexRecord.getStatus() - .equals(SPOOL_FILE_STATUS.write_inprogress)) { + + if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.write_inprogress)) { currentWriterIndexRecord = auditIndexRecord; - logger.info("currentWriterIndexRecord=" - + currentWriterIndexRecord.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME); + + logger.info("currentWriterIndexRecord={}, queueName={}", currentWriterIndexRecord.getFilePath(), FILE_QUEUE_PROVIDER_NAME); } - if (auditIndexRecord.getStatus() - .equals(SPOOL_FILE_STATUS.read_inprogress)) { + + if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.read_inprogress)) { indexQueue.add(auditIndexRecord); } } + printIndex(); - for (int i = 0; i < indexRecords.size(); i++) { - AuditIndexRecord auditIndexRecord = indexRecords.get(i); + + for (AuditIndexRecord auditIndexRecord : indexRecords) { if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.pending)) { File consumerFile = new File(auditIndexRecord.getFilePath()); + if (!consumerFile.exists()) { - logger.error("INIT: Consumer file=" - + consumerFile.getPath() + " not found."); + logger.error("INIT: Consumer file={} not found.", consumerFile.getPath()); } else { indexQueue.add(auditIndexRecord); } @@ -278,22 +265,22 @@ public boolean init(Properties props, String basePropertyName) { } auditFileType = MiscUtil.getStringProperty(props, propPrefix + ".filetype", DEFAULT_AUDIT_FILE_TYPE); + if (auditFileType == null) { auditFileType = DEFAULT_AUDIT_FILE_TYPE; } - } catch (Throwable t) { - logger.error("Error initializing File Spooler. queue=" - + FILE_QUEUE_PROVIDER_NAME, t); + logger.error("Error initializing File Spooler. queue={}", FILE_QUEUE_PROVIDER_NAME, t); + return false; } - bufferSize = MiscUtil.getLongProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_BATCH_SIZE, bufferSize); + bufferSize = MiscUtil.getLongProperty(props, propPrefix + "." + PROP_FILE_SPOOL_BATCH_SIZE, bufferSize); initDone = true; logger.debug("<== AuditFileQueueSpool.init()"); + return true; } @@ -302,38 +289,37 @@ public boolean init(Properties props, String basePropertyName) { */ public void start() { if (!initDone) { - logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName={}", FILE_QUEUE_PROVIDER_NAME); + return; } - logger.info("Starting writerThread, queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logger.info("Starting writerThread, queueName={}, consumer={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); // Let's start the thread to read - destinationThread = new Thread(this, FILE_QUEUE_PROVIDER_NAME + "_" - + consumerProvider.getName() + "_destWriter"); + destinationThread = new Thread(this, FILE_QUEUE_PROVIDER_NAME + "_" + consumerProvider.getName() + "_destWriter"); + destinationThread.setDaemon(true); destinationThread.start(); } public void stop() { if (!initDone) { - logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName={}", FILE_QUEUE_PROVIDER_NAME); + return; } - logger.info("Stop called, queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + + logger.info("Stop called, queueName={}, consumer={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); isDrain = true; + flush(); PrintWriter out = getOpenLogFileStream(); + if (out != null) { - // If write is still going on, then let's give it enough time to - // complete + // If write is still going on, then let's give it enough time to complete for (int i = 0; i < 3; i++) { if (isWriting) { try { @@ -341,25 +327,28 @@ public void stop() { } catch (InterruptedException e) { // ignore } + continue; } + try { - logger.info("Closing open file, queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logger.info("Closing open file, queueName={}, consumer={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); out.flush(); out.close(); + break; } catch (Throwable t) { logger.debug("Error closing spool out file.", t); } } } + try { if (destinationThread != null) { destinationThread.interrupt(); } + destinationThread = null; } catch (Throwable e) { // ignore @@ -368,11 +357,13 @@ public void stop() { public void flush() { if (!initDone) { - logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName={}", FILE_QUEUE_PROVIDER_NAME); + return; } + PrintWriter out = getOpenLogFileStream(); + if (out != null) { out.flush(); } @@ -386,8 +377,8 @@ public void flush() { */ public boolean isPending() { if (!initDone) { - logError("isPending(): File Spooler not initialized. queueName=" - + FILE_QUEUE_PROVIDER_NAME); + logError("isPending(): File Spooler not initialized. queueName=" + FILE_QUEUE_PROVIDER_NAME); + return false; } @@ -403,179 +394,187 @@ public long getLastAttemptTimeDelta() { if (lastAttemptTime == 0) { return 0; } + return System.currentTimeMillis() - lastAttemptTime; } - synchronized public void stashLogs(AuditEventBase event) { - + public synchronized void stashLogs(AuditEventBase event) { if (isDrain) { // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event=" - + event); + logger.error("stashLogs() is called after stop is called. event={}", event); + return; } + try { isWriting = true; - PrintWriter logOut = getLogFileStream(); - // Convert event to json - String jsonStr = MiscUtil.stringify(event); + + PrintWriter logOut = getLogFileStream(); + String jsonStr = MiscUtil.stringify(event); + logOut.println(jsonStr); logOut.flush(); - isPending = true; + + isPending = true; isSpoolingSuccessful = true; - } catch (Throwable t) { + } catch (Throwable t) { isSpoolingSuccessful = false; - logger.error("Error writing to file. event=" + event, t); + + logger.error("Error writing to file. event={}", event, t); } finally { isWriting = false; } - } - synchronized public void stashLogs(Collection events) { + public synchronized void stashLogs(Collection events) { for (AuditEventBase event : events) { stashLogs(event); } + flush(); } - synchronized public void stashLogsString(String event) { + public synchronized void stashLogsString(String event) { if (isDrain) { // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event=" - + event); + logger.error("stashLogs() is called after stop is called. event={}", event); + return; } + try { isWriting = true; + PrintWriter logOut = getLogFileStream(); + logOut.println(event); } catch (Exception ex) { - logger.error("Error writing to file. event=" + event, ex); + logger.error("Error writing to file. event={}", event, ex); } finally { isWriting = false; } - } - synchronized public boolean isSpoolingSuccessful() { + public synchronized boolean isSpoolingSuccessful() { return isSpoolingSuccessful; } - synchronized public void stashLogsString(Collection events) { + public synchronized void stashLogsString(Collection events) { for (String event : events) { stashLogsString(event); } + flush(); } - /** - * This return the current file. If there are not current open output file, - * then it will return null + /* + * (non-Javadoc) * - * @return - * @throws Exception + * @see java.lang.Runnable#run() */ - synchronized private PrintWriter getOpenLogFileStream() { - return logWriter; + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Throwable t) { + logger.error("Exited thread without abnormaly. queue={}", consumerProvider.getName(), t); + } } - /** - * @return - * @throws Exception - */ - synchronized private PrintWriter getLogFileStream() throws Exception { - closeFileIfNeeded(); - // Either there are no open log file or the previous one has been rolled - // over - if (currentWriterIndexRecord == null) { - Date currentTime = new Date(); - // Create a new file - String fileName = MiscUtil.replaceTokens(logFileNameFormat, - currentTime.getTime()); - String newFileName = fileName; - File outLogFile = null; - int i = 0; - while (true) { - outLogFile = new File(logFolder, newFileName); - File archiveLogFile = new File(archiveFolder, newFileName); - if (!outLogFile.exists() && !archiveLogFile.exists()) { + public void runLogAudit() { + // boolean isResumed = false; + while (true) { + try { + if (isDestDown) { + logger.info("Destination is down. sleeping for {} milli seconds. indexQueue={}, queueName={}, consumer={}", retryDestinationMS, indexQueue.size(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); + + Thread.sleep(retryDestinationMS); + } + + // Let's pause between each iteration + if (currentConsumerIndexRecord == null) { + currentConsumerIndexRecord = indexQueue.poll(retryDestinationMS, TimeUnit.MILLISECONDS); + } else { + Thread.sleep(retryDestinationMS); + } + + if (isDrain) { + // Need to exit break; } - i++; - int lastDot = fileName.lastIndexOf('.'); - String baseName = fileName.substring(0, lastDot); - String extension = fileName.substring(lastDot); - newFileName = baseName + "." + i + extension; - } - fileName = newFileName; - logger.info("Creating new file. queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", fileName=" + fileName); - // Open the file - logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - outLogFile),"UTF-8"))); - AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); + if (currentConsumerIndexRecord == null) { + closeFileIfNeeded(); - tmpIndexRecord.setId(MiscUtil.generateUniqueId()); - tmpIndexRecord.setFilePath(outLogFile.getPath()); - tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress); - tmpIndexRecord.setFileCreateTime(currentTime); - tmpIndexRecord.setLastAttempt(true); - currentWriterIndexRecord = tmpIndexRecord; - indexRecords.add(currentWriterIndexRecord); - saveIndexFile(); + continue; + } - } else { - if (logWriter == null) { - // This means the process just started. We need to open the file - // in append mode. - logger.info("Opening existing file for append. queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.getFilePath()); - logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - currentWriterIndexRecord.getFilePath(), true),"UTF-8"))); - } - } - return logWriter; - } + boolean isRemoveIndex = false; + File consumerFile = new File(currentConsumerIndexRecord.getFilePath()); - synchronized private void closeFileIfNeeded() throws FileNotFoundException, - IOException { - // Is there file open to write or there are no pending file, then close - // the active file - if (currentWriterIndexRecord != null) { - // Check whether the file needs to rolled - rollOverSpoolFileByTime(); + if (!consumerFile.exists()) { + logger.error("Consumer file={} not found.", consumerFile.getPath()); - if (closeFile) { - // Roll the file - if (logWriter != null) { - logWriter.flush(); - logWriter.close(); - logWriter = null; - closeFile = false; + printIndex(); + + isRemoveIndex = true; + } else { + // Let's open the file to write + try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(currentConsumerIndexRecord.getFilePath()), StandardCharsets.UTF_8))) { + if (auditFileType.equalsIgnoreCase(DEFAULT_AUDIT_FILE_TYPE)) { + // if Audit File format is JSON each audit file in the Local Spool Location will be copied + // to HDFS location as JSON + File srcFile = new File(currentConsumerIndexRecord.getFilePath()); + + logFile(srcFile); + } else { + // If Audit File format is ORC, each records in audit files in the Local Spool Location will be + // read and converted into ORC format and pushed into an ORC file. + logEvent(br); + } + + logger.info("Done reading file. file={}, queueName={}, consumer={}", currentConsumerIndexRecord.getFilePath(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); + + // The entire file is read + currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done); + currentConsumerIndexRecord.setDoneCompleteTime(new Date()); + currentConsumerIndexRecord.setLastAttempt(true); + + isRemoveIndex = true; + } catch (Exception ex) { + isDestDown = true; + + logError("Destination down. queueName=" + FILE_QUEUE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName()); + + lastAttemptTime = System.currentTimeMillis(); + + // Update the index file + currentConsumerIndexRecord.setLastFailedTime(new Date()); + currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount() + 1); + currentConsumerIndexRecord.setLastAttempt(false); + + saveIndexFile(); + } } - currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending); - currentWriterIndexRecord.setWriteCompleteTime(new Date()); - saveIndexFile(); - logger.info("Adding file to queue. queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.getFilePath()); - indexQueue.add(currentWriterIndexRecord); - currentWriterIndexRecord = null; + + if (isRemoveIndex) { + // Remove this entry from index + removeIndexRecord(currentConsumerIndexRecord); + + currentConsumerIndexRecord = null; + + closeFileIfNeeded(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + } catch (Throwable t) { + logger.error("Exception in destination writing thread.", t); } } - } - private void rollOverSpoolFileByTime() { - if (System.currentTimeMillis() - - currentWriterIndexRecord.getFileCreateTime().getTime() > fileRolloverSec * 1000) { - closeFile = true; - logger.info("Closing file. Rolling over. queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", fileName=" - + currentWriterIndexRecord.getFilePath()); - } + logger.info("Exiting file spooler. provider={}, consumer={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); } /** @@ -584,64 +583,56 @@ private void rollOverSpoolFileByTime() { * @throws IOException */ void loadIndexFile() throws IOException { - logger.info("Loading index file. fileName=" + indexFile.getPath()); - BufferedReader br = null; - try { - br = new BufferedReader(new InputStreamReader(new FileInputStream(indexFile), "UTF-8")); + logger.info("Loading index file. fileName={}", indexFile.getPath()); + + try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(indexFile), StandardCharsets.UTF_8))) { indexRecords.clear(); - String line; - while ((line = br.readLine()) != null) { + + for (String line = br.readLine(); line != null; line = br.readLine()) { if (!line.isEmpty() && !line.startsWith("#")) { - try { - AuditIndexRecord record = MiscUtil.fromJson(line, - AuditIndexRecord.class); - indexRecords.add(record); - } catch (Exception e) { - logger.error("Error parsing following JSON: "+line, e); - } + try { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + indexRecords.add(record); + } catch (Exception e) { + logger.error("Error parsing following JSON: {}", line, e); + } } } - } finally { - if (br!= null) { - br.close(); - } } } synchronized void printIndex() { logger.info("INDEX printIndex() ==== START"); - Iterator iter = indexRecords.iterator(); - while (iter.hasNext()) { - AuditIndexRecord record = iter.next(); - logger.info("INDEX=" + record + ", isFileExist=" - + (new File(record.getFilePath()).exists())); + + for (AuditIndexRecord record : indexRecords) { + logger.info("INDEX={}, isFileExist={}", record, (new File(record.getFilePath()).exists())); } + logger.info("INDEX printIndex() ==== END"); } - synchronized void removeIndexRecord(AuditIndexRecord indexRecord) - throws FileNotFoundException, IOException { - Iterator iter = indexRecords.iterator(); - while (iter.hasNext()) { + synchronized void removeIndexRecord(AuditIndexRecord indexRecord) throws IOException { + for (Iterator iter = indexRecords.iterator(); iter.hasNext(); ) { AuditIndexRecord record = iter.next(); + if (record.getId().equals(indexRecord.getId())) { - logger.info("Removing file from index. file=" + record.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + logger.info("Removing file from index. file={}, queueName={}, consumer={}", record.getFilePath(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); iter.remove(); appendToDoneFile(record); } } + saveIndexFile(); - // If there are no more files in the index, then let's assume the - // destination is now available - if (indexRecords.size() == 0) { + + // If there are no more files in the index, then let's assume the destination is now available + if (indexRecords.isEmpty()) { isPending = false; } } - synchronized void saveIndexFile() throws FileNotFoundException, IOException { + synchronized void saveIndexFile() throws IOException { try (PrintWriter out = new PrintWriter(indexFile, "UTF-8")) { for (AuditIndexRecord auditIndexRecord : indexRecords) { out.println(MiscUtil.stringify(auditIndexRecord)); @@ -649,339 +640,336 @@ synchronized void saveIndexFile() throws FileNotFoundException, IOException { } } - void appendToDoneFile(AuditIndexRecord indexRecord) - throws FileNotFoundException, IOException { - logger.info("Moving to done file. " + indexRecord.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); - String line = MiscUtil.stringify(indexRecord); - PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream( - indexDoneFile, true),"UTF-8"))); - out.println(line); - out.flush(); - out.close(); + void appendToDoneFile(AuditIndexRecord indexRecord) throws IOException { + logger.info("Moving to done file. {}, queueName={}, consumer={}", indexRecord.getFilePath(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); + + try (PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(indexDoneFile, true), StandardCharsets.UTF_8)))) { + String line = MiscUtil.stringify(indexRecord); + + out.println(line); + out.flush(); + } // After Each file is read and audit events are pushed into pipe, we flush to reach the destination immediate. consumerProvider.flush(); // Move to archive folder - File logFile = null; + File logFile = null; File archiveFile = null; + try { - logFile = new File(indexRecord.getFilePath()); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - logger.info("Moving logFile " + logFile + " to " + archiveFile); + logFile = new File(indexRecord.getFilePath()); + archiveFile = new File(archiveFolder, logFile.getName()); + + logger.info("Moving logFile {} to {}", logFile, archiveFile); + boolean result = logFile.renameTo(archiveFile); + if (!result) { - logger.error("Error moving log file to archive folder. Unable to rename" - + logFile + " to archiveFile=" + archiveFile); + logger.error("Error moving log file to archive folder. Unable to rename={} to archiveFile={}", logFile, archiveFile); } } catch (Throwable t) { - logger.error("Error moving log file to archive folder. logFile=" - + logFile + ", archiveFile=" + archiveFile, t); + logger.error("Error moving log file to archive folder. logFile={}, archiveFile={}", logFile, archiveFile, t); } // After archiving the file flush the pipe consumerProvider.flush(); archiveFile = null; + try { // Remove old files - File[] logFiles = archiveFolder.listFiles(new FileFilter() { - public boolean accept(File pathname) { - return pathname.getName().toLowerCase().endsWith(".log"); - } - }); + File[] logFiles = archiveFolder.listFiles(pathname -> pathname.getName().toLowerCase().endsWith(".log")); if (logFiles != null && logFiles.length > maxArchiveFiles) { int filesToDelete = logFiles.length - maxArchiveFiles; - BufferedReader br = new BufferedReader(new FileReader( - indexDoneFile)); - try { + + try (BufferedReader br = new BufferedReader(new FileReader(indexDoneFile))) { int filesDeletedCount = 0; - while ((line = br.readLine()) != null) { + + for (String line = br.readLine(); line != null; line = br.readLine()) { if (!line.isEmpty() && !line.startsWith("#")) { - try { - AuditIndexRecord record = MiscUtil.fromJson(line, - AuditIndexRecord.class); - logFile = new File(record.getFilePath()); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - if (archiveFile.exists()) { - logger.info("Deleting archive file " - + archiveFile); - boolean ret = archiveFile.delete(); - if (!ret) { - logger.error("Error deleting archive file. archiveFile=" - + archiveFile); - } - filesDeletedCount++; - if (filesDeletedCount >= filesToDelete) { - logger.info("Deleted " + filesDeletedCount - + " files"); - break; + try { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + logFile = new File(record.getFilePath()); + archiveFile = new File(archiveFolder, logFile.getName()); + + if (archiveFile.exists()) { + logger.info("Deleting archive file {}", archiveFile); + + boolean ret = archiveFile.delete(); + + if (!ret) { + logger.error("Error deleting archive file. archiveFile={}", archiveFile); + } + + filesDeletedCount++; + + if (filesDeletedCount >= filesToDelete) { + logger.info("Deleted {} files", filesDeletedCount); + + break; + } } + } catch (Exception e) { + logger.error("Error parsing following JSON: {}", line, e); } - } catch (Exception e) { - logger.error("Error parsing following JSON: "+line, e); - } } } - } finally { - br.close(); } } } catch (Throwable t) { - logger.error("Error deleting older archive file. archiveFile=" - + archiveFile, t); + logger.error("Error deleting older archive file. archiveFile={}", archiveFile, t); } - } void logError(String msg) { long currTimeMS = System.currentTimeMillis(); + if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { logger.error(msg); + lastErrorLogMS = currTimeMS; } } - /* - * (non-Javadoc) + /** + * This return the current file. If there are not current open output file, + * then it will return null * - * @see java.lang.Runnable#run() + * @return */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Throwable t) { - logger.error("Exited thread without abnormaly. queue=" - + consumerProvider.getName(), t); - } + private synchronized PrintWriter getOpenLogFileStream() { + return logWriter; } - public void runLogAudit() { - // boolean isResumed = false; - while (true) { - try { - if (isDestDown) { - logger.info("Destination is down. sleeping for " - + retryDestinationMS - + " milli seconds. indexQueue=" + indexQueue.size() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); - Thread.sleep(retryDestinationMS); - } - // Let's pause between each iteration - if (currentConsumerIndexRecord == null) { - currentConsumerIndexRecord = indexQueue.poll( - retryDestinationMS, TimeUnit.MILLISECONDS); - } else { - Thread.sleep(retryDestinationMS); - } + /** + * @return + * @throws Exception + */ + private synchronized PrintWriter getLogFileStream() throws Exception { + closeFileIfNeeded(); - if (isDrain) { - // Need to exit + // Either there are no open log file or the previous one has been rolled over + if (currentWriterIndexRecord == null) { + Date currentTime = new Date(); + + // Create a new file + String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); + String newFileName = fileName; + File outLogFile; + int i = 0; + + while (true) { + outLogFile = new File(logFolder, newFileName); + + File archiveLogFile = new File(archiveFolder, newFileName); + + if (!outLogFile.exists() && !archiveLogFile.exists()) { break; } - if (currentConsumerIndexRecord == null) { - closeFileIfNeeded(); - continue; - } - boolean isRemoveIndex = false; - File consumerFile = new File( - currentConsumerIndexRecord.getFilePath()); - if (!consumerFile.exists()) { - logger.error("Consumer file=" + consumerFile.getPath() - + " not found."); - printIndex(); - isRemoveIndex = true; - } else { - // Let's open the file to write - BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream( - currentConsumerIndexRecord.getFilePath()),"UTF-8")); - try { - if (auditFileType.equalsIgnoreCase(DEFAULT_AUDIT_FILE_TYPE)) { - // if Audit File format is JSON each audit file in the Local Spool Location will be copied - // to HDFS location as JSON - File srcFile = new File(currentConsumerIndexRecord.getFilePath()); - logFile(srcFile); - } else { - // If Audit File format is ORC, each records in audit files in the Local Spool Location will be - // read and converted into ORC format and pushed into an ORC file. - logEvent(br); - } - logger.info("Done reading file. file=" - + currentConsumerIndexRecord.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); - // The entire file is read - currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done); - currentConsumerIndexRecord.setDoneCompleteTime(new Date()); - currentConsumerIndexRecord.setLastAttempt(true); + i++; - isRemoveIndex = true; - } catch (Exception ex) { - isDestDown = true; - logError("Destination down. queueName=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); - lastAttemptTime = System.currentTimeMillis(); - // Update the index file - currentConsumerIndexRecord.setLastFailedTime(new Date()); - currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount() + 1); - currentConsumerIndexRecord.setLastAttempt(false); - saveIndexFile(); - } finally { - br.close(); - } - } - if (isRemoveIndex) { - // Remove this entry from index - removeIndexRecord(currentConsumerIndexRecord); - currentConsumerIndexRecord = null; - closeFileIfNeeded(); + int lastDot = fileName.lastIndexOf('.'); + String baseName = fileName.substring(0, lastDot); + String extension = fileName.substring(lastDot); + + newFileName = baseName + "." + i + extension; + } + + fileName = newFileName; + + logger.info("Creating new file. queueName={}, fileName={}", FILE_QUEUE_PROVIDER_NAME, fileName); + + // Open the file + logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outLogFile), StandardCharsets.UTF_8))); + + AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); + + tmpIndexRecord.setId(MiscUtil.generateUniqueId()); + tmpIndexRecord.setFilePath(outLogFile.getPath()); + tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress); + tmpIndexRecord.setFileCreateTime(currentTime); + tmpIndexRecord.setLastAttempt(true); + + currentWriterIndexRecord = tmpIndexRecord; + + indexRecords.add(currentWriterIndexRecord); + + saveIndexFile(); + } else { + if (logWriter == null) { + // This means the process just started. We need to open the file in append mode. + logger.info("Opening existing file for append. queueName={}, fileName={}", FILE_QUEUE_PROVIDER_NAME, currentWriterIndexRecord.getFilePath()); + + logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(currentWriterIndexRecord.getFilePath(), true), StandardCharsets.UTF_8))); + } + } + + return logWriter; + } + + private synchronized void closeFileIfNeeded() throws IOException { + // Is there file open to write or there are no pending file, then close + // the active file + if (currentWriterIndexRecord != null) { + // Check whether the file needs to rolled + rollOverSpoolFileByTime(); + + if (closeFile) { + // Roll the file + if (logWriter != null) { + logWriter.flush(); + logWriter.close(); + + logWriter = null; + closeFile = false; } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - } catch (Throwable t) { - logger.error("Exception in destination writing thread.", t); + + currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending); + currentWriterIndexRecord.setWriteCompleteTime(new Date()); + + saveIndexFile(); + + logger.info("Adding file to queue. queueName={}, fileName={}", FILE_QUEUE_PROVIDER_NAME, currentWriterIndexRecord.getFilePath()); + + indexQueue.add(currentWriterIndexRecord); + + currentWriterIndexRecord = null; } } - logger.info("Exiting file spooler. provider=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + } + + private void rollOverSpoolFileByTime() { + if (System.currentTimeMillis() - currentWriterIndexRecord.getFileCreateTime().getTime() > (fileRolloverSec * 1000L)) { + closeFile = true; + + logger.info("Closing file. Rolling over. queueName={}, fileName={}", FILE_QUEUE_PROVIDER_NAME, currentWriterIndexRecord.getFilePath()); + } } private void logEvent(BufferedReader br) throws Exception { - String line; - int currLine = 0; - int startLine = currentConsumerIndexRecord.getLinePosition(); - List events = new ArrayList<>(); - while ((line = br.readLine()) != null) { + int currLine = 0; + int startLine = currentConsumerIndexRecord.getLinePosition(); + List events = new ArrayList<>(); + + for (String line = br.readLine(); line != null; line = br.readLine()) { currLine++; + if (currLine < startLine) { continue; } + AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class); + events.add(event); if (events.size() == bufferSize) { - boolean ret = sendEvent(events, - currentConsumerIndexRecord, currLine); + boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine); + if (!ret) { throw new Exception("Destination down"); } + events.clear(); } } - if (events.size() > 0) { - boolean ret = sendEvent(events, - currentConsumerIndexRecord, currLine); + + if (!events.isEmpty()) { + boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine); + if (!ret) { throw new Exception("Destination down"); } + events.clear(); } } - private boolean sendEvent(List events, AuditIndexRecord indexRecord, - int currLine) { + private boolean sendEvent(List events, AuditIndexRecord indexRecord, int currLine) { boolean ret = true; + try { ret = consumerProvider.log(events); + if (!ret) { // Need to log error after fixed interval - logError("Error sending logs to consumer. provider=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()); + logError("Error sending logs to consumer. provider=" + FILE_QUEUE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName()); } else { // Update index and save indexRecord.setLinePosition(currLine); indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress); indexRecord.setLastSuccessTime(new Date()); indexRecord.setLastAttempt(true); + saveIndexFile(); if (isDestDown) { isDestDown = false; - logger.info("Destination up now. " + indexRecord.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + + logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.getFilePath(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); } } } catch (Throwable t) { - logger.error("Error while sending logs to consumer. provider=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName() + ", log=" + events, t); + logger.error("Error while sending logs to consumer. provider={}, consumer={}, log={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName(), events, t); } return ret; } private void logFile(File file) throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> AuditFileQueueSpool.logFile()"); - } - int currLine = 0; - int startLine = currentConsumerIndexRecord.getLinePosition(); + logger.debug("==> AuditFileQueueSpool.logFile()"); + + int currLine = 0; + int startLine = currentConsumerIndexRecord.getLinePosition(); if (currLine < startLine) { currLine++; } - boolean ret = sendFile(file,currentConsumerIndexRecord, currLine); + boolean ret = sendFile(file, currentConsumerIndexRecord, currLine); + if (!ret) { throw new Exception("Destination down"); } - if (logger.isDebugEnabled()) { - logger.debug("<== AuditFileQueueSpool.logFile()"); - } + logger.debug("<== AuditFileQueueSpool.logFile()"); } - private boolean sendFile(File file, AuditIndexRecord indexRecord, - int currLine) { + private boolean sendFile(File file, AuditIndexRecord indexRecord, int currLine) { boolean ret = true; - if (logger.isDebugEnabled()) { - logger.debug("==> AuditFileQueueSpool.sendFile()"); - } + + logger.debug("==> AuditFileQueueSpool.sendFile()"); try { ret = consumerProvider.logFile(file); + if (!ret) { // Need to log error after fixed interval - logError("Error sending log file to consumer. provider=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName()+ ", logFile=" + file.getName()); + logError("Error sending log file to consumer. provider=" + FILE_QUEUE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName() + ", logFile=" + file.getName()); } else { // Update index and save indexRecord.setLinePosition(currLine); indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress); indexRecord.setLastSuccessTime(new Date()); indexRecord.setLastAttempt(true); + saveIndexFile(); if (isDestDown) { isDestDown = false; - logger.info("Destination up now. " + indexRecord.getFilePath() - + ", queueName=" + FILE_QUEUE_PROVIDER_NAME - + ", consumer=" + consumerProvider.getName()); + + logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.getFilePath(), FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName()); } } } catch (Throwable t) { - logger.error("Error sending log file to consumer. provider=" - + FILE_QUEUE_PROVIDER_NAME + ", consumer=" - + consumerProvider.getName() + ", logFile=" + file.getName(), t); + logger.error("Error sending log file to consumer. provider={}, consumer={}, logFile={}", FILE_QUEUE_PROVIDER_NAME, consumerProvider.getName(), file.getName(), t); } - if (logger.isDebugEnabled()) { - logger.debug("<== AuditFileQueueSpool.sendFile() " + ret ); - } + logger.debug("<== AuditFileQueueSpool.sendFile() {}", ret); + return ret; } -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java index 0e550ae8cb..8a1924ba09 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java @@ -19,10 +19,18 @@ package org.apache.ranger.audit.queue; +import org.apache.ranger.audit.model.AuditEventBase; +import org.apache.ranger.audit.model.AuditIndexRecord; +import org.apache.ranger.audit.model.SPOOL_FILE_STATUS; +import org.apache.ranger.audit.provider.AuditHandler; +import org.apache.ranger.audit.provider.MiscUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; -import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; @@ -37,757 +45,825 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; -import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.model.AuditIndexRecord; -import org.apache.ranger.audit.model.SPOOL_FILE_STATUS; -import org.apache.ranger.audit.provider.AuditHandler; -import org.apache.ranger.audit.provider.MiscUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.slf4j.MDC; - /** * This class temporarily stores logs in file system if the destination is * overloaded or down */ public class AuditFileSpool implements Runnable { - private static final Logger logger = LoggerFactory.getLogger(AuditFileSpool.class); - - public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; - public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; - public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; - public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files"; - public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; - public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; - public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; - public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; - public static final String CONSUMER = ", consumer="; - - AuditQueue queueProvider = null; - AuditHandler consumerProvider = null; - - BlockingQueue indexQueue = new LinkedBlockingQueue<>(); - - // Folder and File attributes - File logFolder = null; - String logFileNameFormat = null; - File archiveFolder = null; - String fileNamePrefix = null; - String indexFileName = null; - File indexFile = null; - String indexDoneFileName = null; - File indexDoneFile = null; - int retryDestinationMS = 30 * 1000; // Default 30 seconds - int fileRolloverSec = 24 * 60 * 60; // In seconds - int maxArchiveFiles = 100; - - int errorLogIntervalMS = 30 * 1000; // Every 30 seconds - long lastErrorLogMS = 0; - - List indexRecords = new ArrayList<>(); - - boolean isPending = false; - long lastAttemptTime = 0; - boolean initDone = false; - - PrintWriter logWriter = null; - AuditIndexRecord currentWriterIndexRecord = null; - AuditIndexRecord currentConsumerIndexRecord = null; - - BufferedReader logReader = null; - - Thread destinationThread = null; - - boolean isWriting = true; - boolean isDrain = false; - boolean isDestDown = false; - - public AuditFileSpool(AuditQueue queueProvider, - AuditHandler consumerProvider) { - this.queueProvider = queueProvider; - this.consumerProvider = consumerProvider; - } - - public void init(Properties prop) { - init(prop, null); - } - - public boolean init(Properties props, String basePropertyName) { - if (initDone) { - logger.error("init() called more than once. queueProvider={}, consumerProvider={}", queueProvider.getName(), consumerProvider.getName()); - return true; - } - String propPrefix = "xasecure.audit.filespool"; - if (basePropertyName != null) { - propPrefix = basePropertyName; - } - - try { - // Initial folder and file properties - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_LOCAL_DIR); - logFileNameFormat = MiscUtil.getStringProperty(props, - basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); - String archiveFolderProp = MiscUtil.getStringProperty(props, - propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); - fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILENAME_PREFIX); - indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_INDEX_FILE); - retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); - fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); - maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); - - logger.info("retryDestinationMS={}, queueName={}", retryDestinationMS, queueProvider.getName()); - logger.info("fileRolloverSec={}, queueName={}", fileRolloverSec, queueProvider.getName()); - logger.info("maxArchiveFiles={}, queueName={}", maxArchiveFiles, queueProvider.getName()); - - if (logFolderProp == null || logFolderProp.isEmpty()) { - logger.error("Audit spool folder is not configured. Please set {}.{}. queueName={}", propPrefix, PROP_FILE_SPOOL_LOCAL_DIR, queueProvider.getName()); - return false; - } - logFolder = new File(logFolderProp); - if (!logFolder.isDirectory()) { - logFolder.mkdirs(); - if (!logFolder.isDirectory()) { - logger.error("File Spool folder not found and can't be created. folder={}, queueName={}", logFolder.getAbsolutePath(), queueProvider.getName()); - return false; - } - } - logger.info("logFolder={}, queueName={}", logFolder, queueProvider.getName()); - - if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { - logFileNameFormat = "spool_" + "%app-type%" + "_" - + "%time:yyyyMMdd-HHmm.ss%.log"; - } - logger.info("logFileNameFormat={}, queueName={}", logFileNameFormat, queueProvider.getName()); - - if (archiveFolderProp == null || archiveFolderProp.isEmpty()) { - archiveFolder = new File(logFolder, "archive"); - } else { - archiveFolder = new File(archiveFolderProp); - } - if (!archiveFolder.isDirectory()) { - archiveFolder.mkdirs(); - if (!archiveFolder.isDirectory()) { - logger.error("File Spool archive folder not found and can't be created. folder={}, queueName={}", archiveFolder.getAbsolutePath(), queueProvider.getName()); - return false; - } - } - logger.info("archiveFolder={}, queueName={}", archiveFolder, queueProvider.getName()); - - if (indexFileName == null || indexFileName.isEmpty()) { - if (fileNamePrefix == null || fileNamePrefix.isEmpty()) { - fileNamePrefix = queueProvider.getName() + "_" - + consumerProvider.getName(); - } - indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" - + ".json"; - indexFileName = MiscUtil.replaceTokens(indexFileName, - System.currentTimeMillis()); - } - - indexFile = new File(logFolder, indexFileName); - if (!indexFile.exists()) { - boolean ret = indexFile.createNewFile(); - if (!ret) { - logger.error("Error creating index file. fileName={}", indexDoneFile.getPath()); - return false; - } - } - logger.info("indexFile={}, queueName={}", indexFile, queueProvider.getName()); - - int lastDot = indexFileName.lastIndexOf('.'); - if (lastDot < 0) { - lastDot = indexFileName.length() - 1; - } - indexDoneFileName = indexFileName.substring(0, lastDot) - + "_closed.json"; - indexDoneFile = new File(logFolder, indexDoneFileName); - if (!indexDoneFile.exists()) { - boolean ret = indexDoneFile.createNewFile(); - if (!ret) { - logger.error("Error creating index done file. fileName={}", indexDoneFile.getPath()); - return false; - } - } - logger.info("indexDoneFile={}, queueName={}", indexDoneFile, queueProvider.getName()); - - // Load index file - loadIndexFile(); - for (AuditIndexRecord auditIndexRecord : indexRecords) { - if (!auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.done)) { - isPending = true; - } - if (auditIndexRecord.getStatus() - .equals(SPOOL_FILE_STATUS.write_inprogress)) { - currentWriterIndexRecord = auditIndexRecord; - logger.info("currentWriterIndexRecord={}, queueName={}", currentWriterIndexRecord.getFilePath(), queueProvider.getName()); - } - if (auditIndexRecord.getStatus() - .equals(SPOOL_FILE_STATUS.read_inprogress)) { - indexQueue.add(auditIndexRecord); - } - } - printIndex(); - for (AuditIndexRecord auditIndexRecord : indexRecords) { - if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.pending)) { - File consumerFile = new File(auditIndexRecord.getFilePath()); - if (!consumerFile.exists()) { - logger.error("INIT: Consumer file={} not found.", consumerFile.getPath()); - } else { - indexQueue.add(auditIndexRecord); - } - } - } - - } catch (Throwable t) { - logger.error("Error initializing File Spooler. queue=" - + queueProvider.getName(), t); - return false; - } - initDone = true; - return true; - } - - /** - * Start looking for outstanding logs and update status according. - */ - public void start() { - if (!initDone) { - logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName={}", queueProvider.getName()); - return; - } - - logger.info("Starting writerThread, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - - // Let's start the thread to read - destinationThread = new Thread(this, queueProvider.getName() + "_" - + consumerProvider.getName() + "_destWriter"); - destinationThread.setDaemon(true); - destinationThread.start(); - } - - public void stop() { - if (!initDone) { - logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName()); - return; - } - logger.info("Stop called, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - - isDrain = true; - flush(); - - PrintWriter out = getOpenLogFileStream(); - if (out != null) { - // If write is still going on, then let's give it enough time to - // complete - for (int i = 0; i < 3; i++) { - if (isWriting) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - // ignore - } - continue; - } - try { - logger.info("Closing open file, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - - out.flush(); - out.close(); - break; - } catch (Throwable t) { - logger.debug("Error closing spool out file.", t); - } - } - } - try { - if (destinationThread != null) { - destinationThread.interrupt(); - } - destinationThread = null; - } catch (Exception e) { - // ignore - } - } - - public void flush() { - if (!initDone) { - logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName()); - return; - } - PrintWriter out = getOpenLogFileStream(); - if (out != null) { - out.flush(); - } - } - - /** - * If any files are still not processed. Also, if the destination is not - * reachable - * - * @return - */ - public boolean isPending() { - if (!initDone) { - logError("isPending(): File Spooler not initialized. queueName={}", queueProvider.getName()); - return false; - } - - return isPending; - } - - /** - * Milliseconds from last attempt time - * - * @return - */ - public long getLastAttemptTimeDelta() { - if (lastAttemptTime == 0) { - return 0; - } - return System.currentTimeMillis() - lastAttemptTime; - } - - public synchronized void stashLogs(AuditEventBase event) { - if (isDrain) { - // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event={}", event); - return; - } - try { - isWriting = true; - PrintWriter logOut = getLogFileStream(); - // Convert event to json - String jsonStr = MiscUtil.stringify(event); - logOut.println(jsonStr); - isPending = true; - } catch (Exception ex) { - logger.error("Error writing to file. event={}", event, ex); - } finally { - isWriting = false; - } - - } - - public synchronized void stashLogs(Collection events) { - for (AuditEventBase event : events) { - stashLogs(event); - } - flush(); - } - - public synchronized void stashLogsString(String event) { - if (isDrain) { - // Stop has been called, so this method shouldn't be called - logger.error("stashLogs() is called after stop is called. event={}", event); - return; - } - try { - isWriting = true; - PrintWriter logOut = getLogFileStream(); - logOut.println(event); - } catch (Exception ex) { - logger.error("Error writing to file. event={}", event, ex); - } finally { - isWriting = false; - } - - } - - public synchronized void stashLogsString(Collection events) { - for (String event : events) { - stashLogsString(event); - } - flush(); - } - - /** - * This return the current file. If there are not current open output file, - * then it will return null - * - * @return - * @throws Exception - */ - private synchronized PrintWriter getOpenLogFileStream() { - return logWriter; - } - - /** - * @return - * @throws Exception - */ - private synchronized PrintWriter getLogFileStream() throws Exception { - closeFileIfNeeded(); - - // Either there are no open log file or the previous one has been rolled - // over - if (currentWriterIndexRecord == null) { - Date currentTime = new Date(); - // Create a new file - String fileName = MiscUtil.replaceTokens(logFileNameFormat, - currentTime.getTime()); - String newFileName = fileName; - File outLogFile = null; - int i = 0; - while (true) { - outLogFile = new File(logFolder, newFileName); - File archiveLogFile = new File(archiveFolder, newFileName); - if (!outLogFile.exists() && !archiveLogFile.exists()) { - break; - } - i++; - int lastDot = fileName.lastIndexOf('.'); - String baseName = fileName.substring(0, lastDot); - String extension = fileName.substring(lastDot); - newFileName = baseName + "." + i + extension; - } - fileName = newFileName; - logger.info("Creating new file. queueName={}, filename={}",queueProvider.getName(), fileName); - // Open the file - logWriter = new PrintWriter(new BufferedWriter(new FileWriter( - outLogFile))); - - AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); - - tmpIndexRecord.setId(MiscUtil.generateUniqueId()); - tmpIndexRecord.setFilePath(outLogFile.getPath()); - tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress); - tmpIndexRecord.setFileCreateTime(currentTime); - tmpIndexRecord.setLastAttempt(true); - currentWriterIndexRecord = tmpIndexRecord; - indexRecords.add(currentWriterIndexRecord); - saveIndexFile(); - - } else { - if (logWriter == null) { - // This means the process just started. We need to open the file - // in append mode. - logger.info("Opening existing file for append. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); - logWriter = new PrintWriter(new BufferedWriter(new FileWriter( - currentWriterIndexRecord.getFilePath(), true))); - } - } - return logWriter; - } - - private synchronized void closeFileIfNeeded() throws IOException { - // Is there file open to write or there are no pending file, then close - // the active file - if (currentWriterIndexRecord != null) { - // Check whether the file needs to rolled - boolean closeFile = false; - if (indexRecords.size() == 1) { - closeFile = true; - logger.info("Closing file. Only one open file. queueName=" - + queueProvider.getName() + ", fileName=" - + currentWriterIndexRecord.getFilePath()); - } else if (System.currentTimeMillis() - - currentWriterIndexRecord.getFileCreateTime().getTime() > fileRolloverSec * 1000) { - closeFile = true; - logger.info("Closing file. Only one open file. queueName={}, filename={}",queueProvider.getName(), currentWriterIndexRecord.getFilePath()); - } - if (closeFile) { - // Roll the file - if (logWriter != null) { - logWriter.flush(); - logWriter.close(); - logWriter = null; - } - currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending); - currentWriterIndexRecord.setWriteCompleteTime( new Date()); - saveIndexFile(); - logger.info("Adding file to queue. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); - indexQueue.add(currentWriterIndexRecord); - currentWriterIndexRecord = null; - } - } - } - - /** - * Load the index file - * - * @throws IOException - */ - void loadIndexFile() throws IOException { - logger.info("Loading index file. fileName={}", indexFile.getPath()); - try (BufferedReader br = new BufferedReader(new FileReader(indexFile))) { - indexRecords.clear(); - String line; - while ((line = br.readLine()) != null) { - if (!line.isEmpty() && !line.startsWith("#")) { - AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); - indexRecords.add(record); - } - } - } - } - - synchronized void printIndex() { - logger.info("INDEX printIndex() ==== START"); - Iterator iter = indexRecords.iterator(); - while (iter.hasNext()) { - AuditIndexRecord record = iter.next(); - logger.info("INDEX={}, isFileExist={}", record, (new File(record.getFilePath()).exists())); - } - logger.info("INDEX printIndex() ==== END"); - } - - synchronized void removeIndexRecord(AuditIndexRecord indexRecord) - throws FileNotFoundException, IOException { - Iterator iter = indexRecords.iterator(); - while (iter.hasNext()) { - AuditIndexRecord record = iter.next(); - if (record.getId().equals(indexRecord.getId())) { - logger.info("Removing file from index. file={}, queueName={}, consumer={}", record.getFilePath(), queueProvider.getName(), consumerProvider.getName()); - - iter.remove(); - appendToDoneFile(record); - } - } - saveIndexFile(); - // If there are no more files in the index, then let's assume the - // destination is now available - if (indexRecords.isEmpty()) { - isPending = false; - } - } - - synchronized void saveIndexFile() throws IOException { - try (PrintWriter out = new PrintWriter(indexFile)) { - for (AuditIndexRecord auditIndexRecord : indexRecords) { - out.println(MiscUtil.stringify(auditIndexRecord)); - } - } - } - - void appendToDoneFile(AuditIndexRecord indexRecord) - throws IOException { - logger.info("Moving to done file. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); - String line = MiscUtil.stringify(indexRecord); - PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter( - indexDoneFile, true))); - out.println(line); - out.flush(); - out.close(); - - // Move to archive folder - File logFile = null; - File archiveFile = null; - try { - logFile = new File(indexRecord.getFilePath()); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - logger.info("Moving logFile{} to {}", logFile, archiveFile); - boolean filedRenamed = logFile.renameTo(archiveFile); - if(logger.isDebugEnabled()) { - logger.debug("logFile renamed to archiveFile {}{}", archiveFile, filedRenamed ); - } - } catch (Exception t) { - logger.error("Error moving log file to archive folder. logFile={}, archiveFile={}", logFile, archiveFile, t); - } - - archiveFile = null; - try { - // Remove old files - File[] logFiles = archiveFolder.listFiles(pathname -> pathname.getName().toLowerCase().endsWith(".log")); - - if (logFiles != null && logFiles.length > maxArchiveFiles) { - int filesToDelete = logFiles.length - maxArchiveFiles; - try (BufferedReader br = new BufferedReader(new FileReader(indexDoneFile))) { - int filesDeletedCount = 0; - while ((line = br.readLine()) != null) { - if (!line.isEmpty() && !line.startsWith("#")) { - try { - AuditIndexRecord record = MiscUtil.fromJson(line, - AuditIndexRecord.class); - logFile = new File(record.getFilePath()); - String fileName = logFile.getName(); - archiveFile = new File(archiveFolder, fileName); - if (archiveFile.exists()) { - logger.info("Deleting archive file {}", archiveFile); - boolean ret = archiveFile.delete(); - if (!ret) { - logger.error("Error deleting archive file. archiveFile={}", archiveFile); - } - filesDeletedCount++; - if (filesDeletedCount >= filesToDelete) { - logger.info("Deleted {} files", filesDeletedCount); - break; - } - } - } catch (Exception e) { - logger.error("Error parsing following JSON: "+line, e); - } - } - } - } - } - } catch (Exception t) { - logger.error("Error deleting older archive file. archiveFile=" - + archiveFile, t); - } - - } - - void logError(String msg, Object... arguments) { - long currTimeMS = System.currentTimeMillis(); - if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { - logger.error(msg, arguments); - lastErrorLogMS = currTimeMS; - } - } - - class AuditFileSpoolAttempt { - Date attemptTime; - String status; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Runnable#run() - */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Exception t) { - logger.error("Exited thread without abnormaly. queue=" - + consumerProvider.getName(), t); - } - } - - public void runLogAudit() { - while (true) { - try { - if (isDestDown) { - logger.info("Destination is down. sleeping for {} milli seconds. indexQueue={}, queueName={}, consumer={}", - retryDestinationMS, indexQueue.size(), queueProvider.getName(), consumerProvider.getName()); - Thread.sleep(retryDestinationMS); - } - - // Let's pause between each iteration - if (currentConsumerIndexRecord == null) { - currentConsumerIndexRecord = indexQueue.poll( - retryDestinationMS, TimeUnit.MILLISECONDS); - } else { - Thread.sleep(retryDestinationMS); - } - - if (isDrain) { - // Need to exit - break; - } - if (currentConsumerIndexRecord == null) { - closeFileIfNeeded(); - continue; - } - - boolean isRemoveIndex = false; - File consumerFile = new File( - currentConsumerIndexRecord.getFilePath()); - if (!consumerFile.exists()) { - logger.error("Consumer file={} not found.", consumerFile.getPath()); - printIndex(); - isRemoveIndex = true; - } else { - // Let's open the file to write - try (BufferedReader br = new BufferedReader(new FileReader(currentConsumerIndexRecord.getFilePath()))) { - int startLine = currentConsumerIndexRecord.getLinePosition(); - String line; - int currLine = 0; - List lines = new ArrayList<>(); - while ((line = br.readLine()) != null) { - currLine++; - if (currLine < startLine) { - continue; - } - lines.add(line); - if (lines.size() == queueProvider.getMaxBatchSize()) { - boolean ret = sendEvent(lines, - currentConsumerIndexRecord, currLine); - if (!ret) { - throw new Exception("Destination down"); - } - lines.clear(); - } - } - if (!lines.isEmpty()) { - boolean ret = sendEvent(lines, - currentConsumerIndexRecord, currLine); - if (!ret) { - throw new Exception("Destination down"); - } - lines.clear(); - } - logger.info("Done reading file. file={}, queueName={}, consumer={}", currentConsumerIndexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); - // The entire file is read - currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done); - currentConsumerIndexRecord.setDoneCompleteTime(new Date()); - currentConsumerIndexRecord.setLastAttempt(true); - - isRemoveIndex = true; - } catch (Exception ex) { - isDestDown = true; - logError("Destination down. queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - lastAttemptTime = System.currentTimeMillis(); - // Update the index file - currentConsumerIndexRecord.setLastFailedTime(new Date()); - currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount()+1); - currentConsumerIndexRecord.setLastAttempt(false); - saveIndexFile(); - } - } - if (isRemoveIndex) { - // Remove this entry from index - removeIndexRecord(currentConsumerIndexRecord); - currentConsumerIndexRecord = null; - closeFileIfNeeded(); - } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - break; - } catch (Exception t) { - logger.error("Exception in destination writing thread.", t); - } - } - logger.info("Exiting file spooler. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - } - - private boolean sendEvent(List lines, AuditIndexRecord indexRecord, - int currLine) { - boolean ret = true; - try { - ret = consumerProvider.logJSON(lines); - if (!ret) { - // Need to log error after fixed interval - logError("Error sending logs to consumer. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); - } else { - // Update index and save - indexRecord.setLinePosition(currLine); - indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress); - indexRecord.setLastSuccessTime(new Date()); - indexRecord.setLastAttempt(true); - saveIndexFile(); - - if (isDestDown) { - isDestDown = false; - logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); - } - } - } catch (Exception t) { - logger.error("Error while sending logs to consumer. provider={}, consumer={}, logEventCount={}", queueProvider.getName(), consumerProvider.getName(), lines.size(), t); - } - - return ret; - } + private static final Logger logger = LoggerFactory.getLogger(AuditFileSpool.class); + + public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir"; + public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format"; + public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir"; + public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files"; + public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix"; + public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec"; + public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename"; + public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms"; + public static final String CONSUMER = ", consumer="; + + AuditQueue queueProvider; + AuditHandler consumerProvider; + BlockingQueue indexQueue = new LinkedBlockingQueue<>(); + + // Folder and File attributes + File logFolder; + String logFileNameFormat; + File archiveFolder; + String fileNamePrefix; + String indexFileName; + File indexFile; + String indexDoneFileName; + File indexDoneFile; + int retryDestinationMS = 30 * 1000; // Default 30 seconds + int fileRolloverSec = 24 * 60 * 60; // In seconds + int maxArchiveFiles = 100; + + int errorLogIntervalMS = 30 * 1000; // Every 30 seconds + long lastErrorLogMS; + + List indexRecords = new ArrayList<>(); + + boolean isPending; + long lastAttemptTime; + boolean initDone; + + PrintWriter logWriter; + AuditIndexRecord currentWriterIndexRecord; + AuditIndexRecord currentConsumerIndexRecord; + + BufferedReader logReader; + + Thread destinationThread; + + boolean isWriting = true; + boolean isDrain; + boolean isDestDown; + + public AuditFileSpool(AuditQueue queueProvider, AuditHandler consumerProvider) { + this.queueProvider = queueProvider; + this.consumerProvider = consumerProvider; + } + + public void init(Properties prop) { + init(prop, null); + } + + public boolean init(Properties props, String basePropertyName) { + if (initDone) { + logger.error("init() called more than once. queueProvider={}, consumerProvider={}", queueProvider.getName(), consumerProvider.getName()); + return true; + } + + String propPrefix = "xasecure.audit.filespool"; + + if (basePropertyName != null) { + propPrefix = basePropertyName; + } + + try { + // Initial folder and file properties + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_LOCAL_DIR); + String archiveFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR); + + logFileNameFormat = MiscUtil.getStringProperty(props, basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME); + fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILENAME_PREFIX); + indexFileName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILE_SPOOL_INDEX_FILE); + retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS); + fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec); + maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles); + + logger.info("retryDestinationMS={}, queueName={}", retryDestinationMS, queueProvider.getName()); + logger.info("fileRolloverSec={}, queueName={}", fileRolloverSec, queueProvider.getName()); + logger.info("maxArchiveFiles={}, queueName={}", maxArchiveFiles, queueProvider.getName()); + + if (logFolderProp == null || logFolderProp.isEmpty()) { + logger.error("Audit spool folder is not configured. Please set {}.{}.queueName={}", propPrefix, PROP_FILE_SPOOL_LOCAL_DIR, queueProvider.getName()); + + return false; + } + + logFolder = new File(logFolderProp); + + if (!logFolder.isDirectory()) { + logFolder.mkdirs(); + + if (!logFolder.isDirectory()) { + logger.error("File Spool folder not found and can't be created. folder={}, queueName={}", logFolder.getAbsolutePath(), queueProvider.getName()); + + return false; + } + } + + logger.info("logFolder={}, queueName={}", logFolder, queueProvider.getName()); + + if (logFileNameFormat == null || logFileNameFormat.isEmpty()) { + logFileNameFormat = "spool_" + "%app-type%" + "_" + "%time:yyyyMMdd-HHmm.ss%.log"; + } + + logger.info("logFileNameFormat={}, queueName={}", logFileNameFormat, queueProvider.getName()); + + if (archiveFolderProp == null || archiveFolderProp.isEmpty()) { + archiveFolder = new File(logFolder, "archive"); + } else { + archiveFolder = new File(archiveFolderProp); + } + + if (!archiveFolder.isDirectory()) { + archiveFolder.mkdirs(); + + if (!archiveFolder.isDirectory()) { + logger.error("File Spool archive folder not found and can't be created. folder={}, queueName={}", archiveFolder.getAbsolutePath(), queueProvider.getName()); + + return false; + } + } + + logger.info("archiveFolder={}, queueName={}", archiveFolder, queueProvider.getName()); + + if (indexFileName == null || indexFileName.isEmpty()) { + if (fileNamePrefix == null || fileNamePrefix.isEmpty()) { + fileNamePrefix = queueProvider.getName() + "_" + consumerProvider.getName(); + } + + indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%" + ".json"; + indexFileName = MiscUtil.replaceTokens(indexFileName, System.currentTimeMillis()); + } + + indexFile = new File(logFolder, indexFileName); + + if (!indexFile.exists()) { + boolean ret = indexFile.createNewFile(); + + if (!ret) { + logger.error("Error creating index file. fileName={}", indexDoneFile.getPath()); + + return false; + } + } + + logger.info("indexFile={}, queueName={}", indexFile, queueProvider.getName()); + + int lastDot = indexFileName.lastIndexOf('.'); + + if (lastDot < 0) { + lastDot = indexFileName.length() - 1; + } + + indexDoneFileName = indexFileName.substring(0, lastDot) + "_closed.json"; + indexDoneFile = new File(logFolder, indexDoneFileName); + + if (!indexDoneFile.exists()) { + boolean ret = indexDoneFile.createNewFile(); + + if (!ret) { + logger.error("Error creating index done file. fileName={}", indexDoneFile.getPath()); + + return false; + } + } + + logger.info("indexDoneFile={}, queueName={}", indexDoneFile, queueProvider.getName()); + + // Load index file + loadIndexFile(); + + for (AuditIndexRecord auditIndexRecord : indexRecords) { + if (!auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.done)) { + isPending = true; + } + + if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.write_inprogress)) { + currentWriterIndexRecord = auditIndexRecord; + + logger.info("currentWriterIndexRecord={}, queueName={}", currentWriterIndexRecord.getFilePath(), queueProvider.getName()); + } + + if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.read_inprogress)) { + indexQueue.add(auditIndexRecord); + } + } + + printIndex(); + + for (AuditIndexRecord auditIndexRecord : indexRecords) { + if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.pending)) { + File consumerFile = new File(auditIndexRecord.getFilePath()); + + if (!consumerFile.exists()) { + logger.error("INIT: Consumer file={} not found.", consumerFile.getPath()); + } else { + indexQueue.add(auditIndexRecord); + } + } + } + } catch (Throwable t) { + logger.error("Error initializing File Spooler. queue={}", queueProvider.getName(), t); + + return false; + } + + initDone = true; + + return true; + } + + /** + * Start looking for outstanding logs and update status according. + */ + public void start() { + if (!initDone) { + logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName={}", queueProvider.getName()); + return; + } + + logger.info("Starting writerThread, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + + // Let's start the thread to read + destinationThread = new Thread(this, queueProvider.getName() + "_" + consumerProvider.getName() + "_destWriter"); + + destinationThread.setDaemon(true); + destinationThread.start(); + } + + public void stop() { + if (!initDone) { + logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName()); + return; + } + + logger.info("Stop called, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + + isDrain = true; + + flush(); + + PrintWriter out = getOpenLogFileStream(); + + if (out != null) { + // If write is still going on, then let's give it enough time to complete + for (int i = 0; i < 3; i++) { + if (isWriting) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + // ignore + } + continue; + } + + try { + logger.info("Closing open file, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + + out.flush(); + out.close(); + + break; + } catch (Throwable t) { + logger.debug("Error closing spool out file.", t); + } + } + } + + try { + if (destinationThread != null) { + destinationThread.interrupt(); + } + + destinationThread = null; + } catch (Exception e) { + // ignore + } + } + + public void flush() { + if (!initDone) { + logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName()); + return; + } + + PrintWriter out = getOpenLogFileStream(); + + if (out != null) { + out.flush(); + } + } + + /** + * If any files are still not processed. Also, if the destination is not + * reachable + * + * @return + */ + public boolean isPending() { + if (!initDone) { + logError("isPending(): File Spooler not initialized. queueName={}", queueProvider.getName()); + + return false; + } + + return isPending; + } + + /** + * Milliseconds from last attempt time + * + * @return + */ + public long getLastAttemptTimeDelta() { + if (lastAttemptTime == 0) { + return 0; + } + + return System.currentTimeMillis() - lastAttemptTime; + } + + public synchronized void stashLogs(AuditEventBase event) { + if (isDrain) { + // Stop has been called, so this method shouldn't be called + logger.error("stashLogs() is called after stop is called. event={}", event); + + return; + } + + try { + isWriting = true; + + PrintWriter logOut = getLogFileStream(); + String jsonStr = MiscUtil.stringify(event); // Convert event to json + + logOut.println(jsonStr); + + isPending = true; + } catch (Exception ex) { + logger.error("Error writing to file. event={}", event, ex); + } finally { + isWriting = false; + } + } + + public synchronized void stashLogs(Collection events) { + for (AuditEventBase event : events) { + stashLogs(event); + } + + flush(); + } + + public synchronized void stashLogsString(String event) { + if (isDrain) { + // Stop has been called, so this method shouldn't be called + logger.error("stashLogs() is called after stop is called. event={}", event); + + return; + } + try { + isWriting = true; + + PrintWriter logOut = getLogFileStream(); + + logOut.println(event); + } catch (Exception ex) { + logger.error("Error writing to file. event={}", event, ex); + } finally { + isWriting = false; + } + } + + public synchronized void stashLogsString(Collection events) { + for (String event : events) { + stashLogsString(event); + } + + flush(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Runnable#run() + */ + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Exception t) { + logger.error("Exited thread without abnormaly. queue={}", consumerProvider.getName(), t); + } + } + + public void runLogAudit() { + while (true) { + try { + if (isDestDown) { + logger.info("Destination is down. sleeping for {} milli seconds. indexQueue={}, queueName={}, consumer={}", retryDestinationMS, indexQueue.size(), queueProvider.getName(), consumerProvider.getName()); + + Thread.sleep(retryDestinationMS); + } + + // Let's pause between each iteration + if (currentConsumerIndexRecord == null) { + currentConsumerIndexRecord = indexQueue.poll(retryDestinationMS, TimeUnit.MILLISECONDS); + } else { + Thread.sleep(retryDestinationMS); + } + + if (isDrain) { + // Need to exit + break; + } + + if (currentConsumerIndexRecord == null) { + closeFileIfNeeded(); + + continue; + } + + boolean isRemoveIndex = false; + File consumerFile = new File(currentConsumerIndexRecord.getFilePath()); + + if (!consumerFile.exists()) { + logger.error("Consumer file={} not found.", consumerFile.getPath()); + + printIndex(); + + isRemoveIndex = true; + } else { + // Let's open the file to write + try (BufferedReader br = new BufferedReader(new FileReader(currentConsumerIndexRecord.getFilePath()))) { + int startLine = currentConsumerIndexRecord.getLinePosition(); + int currLine = 0; + List lines = new ArrayList<>(); + + for (String line = br.readLine(); line != null; line = br.readLine()) { + currLine++; + + if (currLine < startLine) { + continue; + } + + lines.add(line); + + if (lines.size() == queueProvider.getMaxBatchSize()) { + boolean ret = sendEvent(lines, currentConsumerIndexRecord, currLine); + + if (!ret) { + throw new Exception("Destination down"); + } + + lines.clear(); + } + } + + if (!lines.isEmpty()) { + boolean ret = sendEvent(lines, currentConsumerIndexRecord, currLine); + + if (!ret) { + throw new Exception("Destination down"); + } + + lines.clear(); + } + + logger.info("Done reading file. file={}, queueName={}, consumer={}", currentConsumerIndexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); + + // The entire file is read + currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done); + currentConsumerIndexRecord.setDoneCompleteTime(new Date()); + currentConsumerIndexRecord.setLastAttempt(true); + + isRemoveIndex = true; + } catch (Exception ex) { + isDestDown = true; + + logError("Destination down. queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + + lastAttemptTime = System.currentTimeMillis(); + + // Update the index file + currentConsumerIndexRecord.setLastFailedTime(new Date()); + currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount() + 1); + currentConsumerIndexRecord.setLastAttempt(false); + + saveIndexFile(); + } + } + + if (isRemoveIndex) { + // Remove this entry from index + removeIndexRecord(currentConsumerIndexRecord); + + currentConsumerIndexRecord = null; + + closeFileIfNeeded(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + + break; + } catch (Exception t) { + logger.error("Exception in destination writing thread.", t); + } + } + + logger.info("Exiting file spooler. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + } + + /** + * Load the index file + * + * @throws IOException + */ + void loadIndexFile() throws IOException { + logger.info("Loading index file. fileName={}", indexFile.getPath()); + + try (BufferedReader br = new BufferedReader(new FileReader(indexFile))) { + indexRecords.clear(); + + for (String line = br.readLine(); line != null; line = br.readLine()) { + if (!line.isEmpty() && !line.startsWith("#")) { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + indexRecords.add(record); + } + } + } + } + + synchronized void printIndex() { + logger.info("INDEX printIndex() ==== START"); + + for (AuditIndexRecord record : indexRecords) { + logger.info("INDEX={}, isFileExist={}", record, (new File(record.getFilePath()).exists())); + } + + logger.info("INDEX printIndex() ==== END"); + } + + synchronized void removeIndexRecord(AuditIndexRecord indexRecord) throws IOException { + Iterator iter = indexRecords.iterator(); + + while (iter.hasNext()) { + AuditIndexRecord record = iter.next(); + + if (record.getId().equals(indexRecord.getId())) { + logger.info("Removing file from index. file={}, queueName={}, consumer={}", record.getFilePath(), queueProvider.getName(), consumerProvider.getName()); + + iter.remove(); + + appendToDoneFile(record); + } + } + + saveIndexFile(); + + // If there are no more files in the index, then let's assume the + // destination is now available + if (indexRecords.isEmpty()) { + isPending = false; + } + } + + synchronized void saveIndexFile() throws IOException { + try (PrintWriter out = new PrintWriter(indexFile)) { + for (AuditIndexRecord auditIndexRecord : indexRecords) { + out.println(MiscUtil.stringify(auditIndexRecord)); + } + } + } + + void appendToDoneFile(AuditIndexRecord indexRecord) throws IOException { + logger.info("Moving to done file. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); + + try (PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(indexDoneFile, true)))) { + String line = MiscUtil.stringify(indexRecord); + + out.println(line); + out.flush(); + } + + // Move to archive folder + File logFile = null; + File archiveFile = null; + + try { + logFile = new File(indexRecord.getFilePath()); + archiveFile = new File(archiveFolder, logFile.getName()); + + logger.info("Moving logFile{} to {}", logFile, archiveFile); + + boolean filedRenamed = logFile.renameTo(archiveFile); + + logger.debug("logFile renamed to archiveFile {} {}", archiveFile, filedRenamed); + } catch (Exception t) { + logger.error("Error moving log file to archive folder. logFile={}, archiveFile={}", logFile, archiveFile, t); + } + + archiveFile = null; + + try { + // Remove old files + File[] logFiles = archiveFolder.listFiles(pathname -> pathname.getName().toLowerCase().endsWith(".log")); + + if (logFiles != null && logFiles.length > maxArchiveFiles) { + int filesToDelete = logFiles.length - maxArchiveFiles; + + try (BufferedReader br = new BufferedReader(new FileReader(indexDoneFile))) { + int filesDeletedCount = 0; + + for (String line = br.readLine(); line != null; line = br.readLine()) { + if (!line.isEmpty() && !line.startsWith("#")) { + try { + AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class); + + logFile = new File(record.getFilePath()); + archiveFile = new File(archiveFolder, logFile.getName()); + + if (archiveFile.exists()) { + logger.info("Deleting archive file {}", archiveFile); + + boolean ret = archiveFile.delete(); + + if (!ret) { + logger.error("Error deleting archive file. archiveFile={}", archiveFile); + } + + filesDeletedCount++; + + if (filesDeletedCount >= filesToDelete) { + logger.info("Deleted {} files", filesDeletedCount); + + break; + } + } + } catch (Exception e) { + logger.error("Error parsing following JSON: {}", line, e); + } + } + } + } + } + } catch (Exception t) { + logger.error("Error deleting older archive file. archiveFile={}", archiveFile, t); + } + } + + void logError(String msg, Object... arguments) { + long currTimeMS = System.currentTimeMillis(); + + if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) { + logger.error(msg, arguments); + + lastErrorLogMS = currTimeMS; + } + } + + /** + * This return the current file. If there are not current open output file, + * then it will return null + * + * @return + */ + private synchronized PrintWriter getOpenLogFileStream() { + return logWriter; + } + + /** + * @return + * @throws Exception + */ + private synchronized PrintWriter getLogFileStream() throws Exception { + closeFileIfNeeded(); + + // Either there are no open log file or the previous one has been rolled + // over + if (currentWriterIndexRecord == null) { + // Create a new file + Date currentTime = new Date(); + String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); + String newFileName = fileName; + File outLogFile; + int i = 0; + + while (true) { + outLogFile = new File(logFolder, newFileName); + + File archiveLogFile = new File(archiveFolder, newFileName); + + if (!outLogFile.exists() && !archiveLogFile.exists()) { + break; + } + + i++; + + int lastDot = fileName.lastIndexOf('.'); + String baseName = fileName.substring(0, lastDot); + String extension = fileName.substring(lastDot); + + newFileName = baseName + "." + i + extension; + } + + fileName = newFileName; + + logger.info("Creating new file. queueName={}, filename={}", queueProvider.getName(), fileName); + + // Open the file + logWriter = new PrintWriter(new BufferedWriter(new FileWriter(outLogFile))); + + AuditIndexRecord tmpIndexRecord = new AuditIndexRecord(); + + tmpIndexRecord.setId(MiscUtil.generateUniqueId()); + tmpIndexRecord.setFilePath(outLogFile.getPath()); + tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress); + tmpIndexRecord.setFileCreateTime(currentTime); + tmpIndexRecord.setLastAttempt(true); + + currentWriterIndexRecord = tmpIndexRecord; + + indexRecords.add(currentWriterIndexRecord); + + saveIndexFile(); + } else { + if (logWriter == null) { + // This means the process just started. We need to open the file + // in append mode. + logger.info("Opening existing file for append. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); + logWriter = new PrintWriter(new BufferedWriter(new FileWriter(currentWriterIndexRecord.getFilePath(), true))); + } + } + + return logWriter; + } + + private synchronized void closeFileIfNeeded() throws IOException { + // Is there file open to write or there are no pending file, then close the active file + if (currentWriterIndexRecord != null) { + // Check whether the file needs to rolled + boolean closeFile = false; + + if (indexRecords.size() == 1) { + closeFile = true; + + logger.info("Closing file. Only one open file. queueName={}, fileName={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); + } else if (System.currentTimeMillis() - currentWriterIndexRecord.getFileCreateTime().getTime() > fileRolloverSec * 1000L) { + closeFile = true; + + logger.info("Closing file. Only one open file. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); + } + + if (closeFile) { + // Roll the file + if (logWriter != null) { + logWriter.flush(); + logWriter.close(); + + logWriter = null; + } + + currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending); + currentWriterIndexRecord.setWriteCompleteTime(new Date()); + + saveIndexFile(); + + logger.info("Adding file to queue. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath()); + + indexQueue.add(currentWriterIndexRecord); + + currentWriterIndexRecord = null; + } + } + } + + private boolean sendEvent(List lines, AuditIndexRecord indexRecord, int currLine) { + boolean ret = true; + + try { + ret = consumerProvider.logJSON(lines); + + if (!ret) { + // Need to log error after fixed interval + logError("Error sending logs to consumer. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName()); + } else { + // Update index and save + indexRecord.setLinePosition(currLine); + indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress); + indexRecord.setLastSuccessTime(new Date()); + indexRecord.setLastAttempt(true); + + saveIndexFile(); + + if (isDestDown) { + isDestDown = false; + + logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName()); + } + } + } catch (Exception t) { + logger.error("Error while sending logs to consumer. provider={}, consumer={}, logEventCount={}", queueProvider.getName(), consumerProvider.getName(), lines.size(), t); + } + return ret; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java index e2d974121c..e89fedb9ca 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java @@ -19,8 +19,6 @@ package org.apache.ranger.audit.queue; -import java.util.Properties; - import org.apache.ranger.audit.destination.AuditDestination; import org.apache.ranger.audit.provider.AuditHandler; import org.apache.ranger.audit.provider.BaseAuditHandler; @@ -28,203 +26,190 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public abstract class AuditQueue extends BaseAuditHandler { - private static final Logger LOG = LoggerFactory.getLogger(AuditQueue.class); - - public static final int AUDIT_MAX_QUEUE_SIZE_DEFAULT = 1024 * 1024; - public static final int AUDIT_BATCH_INTERVAL_DEFAULT_MS = 3000; - public static final int AUDIT_BATCH_SIZE_DEFAULT = 1000; - - // This is the max time the consumer thread will wait before exiting the - // loop - public static final int AUDIT_CONSUMER_THREAD_WAIT_MS = 5000; - - private int maxQueueSize = AUDIT_MAX_QUEUE_SIZE_DEFAULT; - private int maxBatchInterval = AUDIT_BATCH_INTERVAL_DEFAULT_MS; - private int maxBatchSize = AUDIT_BATCH_SIZE_DEFAULT; - - public static final String PROP_QUEUE = "queue"; - - public static final String PROP_BATCH_SIZE = "batch.size"; - public static final String PROP_QUEUE_SIZE = "queue.size"; - public static final String PROP_BATCH_INTERVAL = "batch.interval.ms"; - - public static final String PROP_FILE_SPOOL_ENABLE = "filespool.enable"; - public static final String PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN = "filespool.drain.full.wait.ms"; - public static final String PROP_FILE_SPOOL_QUEUE_THRESHOLD = "filespool.drain.threshold.percent"; - - final protected AuditHandler consumer; - protected AuditFileSpool fileSpooler = null; - - private boolean isDrain = false; - - protected boolean fileSpoolerEnabled = false; - protected int fileSpoolMaxWaitTime = 5 * 60 * 1000; // Default 5 minutes - protected int fileSpoolDrainThresholdPercent = 80; - - boolean isConsumerDestination = false; - // This is set when the first time stop is called. - protected long stopTime = 0; - - /** - * @param consumer - */ - public AuditQueue(AuditHandler consumer) { - this.consumer = consumer; - if (consumer instanceof BaseAuditHandler) { - BaseAuditHandler baseAuditHander = (BaseAuditHandler) consumer; - baseAuditHander.setParentPath(getName()); - } - - if (consumer != null && consumer instanceof AuditDestination) { - // If consumer is destination, then the thread should run as server - // user - isConsumerDestination = true; - } - } - - @Override - public void init(Properties props, String basePropertyName) { - LOG.info("BaseAuditProvider.init()"); - super.init(props, basePropertyName); - - setMaxBatchSize(MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_BATCH_SIZE, getMaxBatchSize())); - setMaxQueueSize(MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_QUEUE_SIZE, getMaxQueueSize())); - setMaxBatchInterval(MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_BATCH_INTERVAL, getMaxBatchInterval())); - - fileSpoolerEnabled = MiscUtil.getBooleanProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_ENABLE, false); - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix - + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR); - if (fileSpoolerEnabled || logFolderProp != null) { - LOG.info("File spool is enabled for " + getName() - + ", logFolderProp=" + logFolderProp + ", " + propPrefix - + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR + "=" - + fileSpoolerEnabled); - fileSpoolerEnabled = true; - fileSpoolMaxWaitTime = MiscUtil.getIntProperty(props, propPrefix - + "." + PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN, - fileSpoolMaxWaitTime); - fileSpoolDrainThresholdPercent = MiscUtil.getIntProperty(props, - propPrefix + "." + PROP_FILE_SPOOL_QUEUE_THRESHOLD, - fileSpoolDrainThresholdPercent); - fileSpooler = new AuditFileSpool(this, consumer); - if (!fileSpooler.init(props, basePropertyName)) { - fileSpoolerEnabled = false; - LOG.error("Couldn't initialize file spooler. Disabling it. queue=" - + getName() + ", consumer=" + consumer.getName()); - } - } else { - LOG.info("File spool is disabled for " + getName()); - } - - } - - @Override - public void setParentPath(String parentPath) { - super.setParentPath(parentPath); - if (consumer != null && consumer instanceof BaseAuditHandler) { - BaseAuditHandler base = (BaseAuditHandler) consumer; - base.setParentPath(getName()); - } - } - - @Override - public String getFinalPath() { - if (consumer != null) { - if (consumer instanceof BaseAuditHandler) { - return ((BaseAuditHandler) consumer).getFinalPath(); - } else { - return consumer.getName(); - } - } - return getName(); - } - - @Override - public void setName(String name) { - super.setName(name); - if (consumer != null && consumer instanceof BaseAuditHandler) { - BaseAuditHandler base = (BaseAuditHandler) consumer; - base.setParentPath(getName()); - } - } - - public AuditHandler getConsumer() { - return consumer; - } - - public boolean isDrainMaxTimeElapsed() { - return (stopTime - System.currentTimeMillis()) > AUDIT_CONSUMER_THREAD_WAIT_MS; - } - - public boolean isDrain() { - return isDrain; - } - - public void setDrain(boolean isDrain) { - if (isDrain && stopTime != 0) { - stopTime = System.currentTimeMillis(); - } - this.isDrain = isDrain; - } - - public int getMaxQueueSize() { - return maxQueueSize; - } - - public void setMaxQueueSize(int maxQueueSize) { - this.maxQueueSize = maxQueueSize; - } - - public int getMaxBatchInterval() { - return maxBatchInterval; - } - - public void setMaxBatchInterval(int maxBatchInterval) { - this.maxBatchInterval = maxBatchInterval; - } - - public int getMaxBatchSize() { - return maxBatchSize; - } - - public void setMaxBatchSize(int maxBatchSize) { - this.maxBatchSize = maxBatchSize; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() - */ - @Override - public void waitToComplete() { - if (consumer != null) { - consumer.waitToComplete(-1); - } - } - - @Override - public void waitToComplete(long timeout) { - if (consumer != null) { - consumer.waitToComplete(timeout); - } - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#flush() - */ - @Override - public void flush() { - if (consumer != null) { - consumer.flush(); - } - } +import java.util.Properties; +public abstract class AuditQueue extends BaseAuditHandler { + private static final Logger LOG = LoggerFactory.getLogger(AuditQueue.class); + + public static final int AUDIT_MAX_QUEUE_SIZE_DEFAULT = 1024 * 1024; + public static final int AUDIT_BATCH_INTERVAL_DEFAULT_MS = 3000; + public static final int AUDIT_BATCH_SIZE_DEFAULT = 1000; + public static final int AUDIT_CONSUMER_THREAD_WAIT_MS = 5000; // This is the max time the consumer thread will wait before exiting the loop + public static final String PROP_QUEUE = "queue"; + public static final String PROP_BATCH_SIZE = "batch.size"; + public static final String PROP_QUEUE_SIZE = "queue.size"; + public static final String PROP_BATCH_INTERVAL = "batch.interval.ms"; + public static final String PROP_FILE_SPOOL_ENABLE = "filespool.enable"; + public static final String PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN = "filespool.drain.full.wait.ms"; + public static final String PROP_FILE_SPOOL_QUEUE_THRESHOLD = "filespool.drain.threshold.percent"; + + protected final AuditHandler consumer; + protected AuditFileSpool fileSpooler; + protected boolean fileSpoolerEnabled; + protected int fileSpoolMaxWaitTime = 5 * 60 * 1000; // Default 5 minutes + protected int fileSpoolDrainThresholdPercent = 80; + + // This is set when the first time stop is called. + protected long stopTime; + + boolean isConsumerDestination; + + private int maxQueueSize = AUDIT_MAX_QUEUE_SIZE_DEFAULT; + private int maxBatchInterval = AUDIT_BATCH_INTERVAL_DEFAULT_MS; + private int maxBatchSize = AUDIT_BATCH_SIZE_DEFAULT; + private boolean isDrain; + + /** + * @param consumer + */ + public AuditQueue(AuditHandler consumer) { + this.consumer = consumer; + + if (consumer instanceof BaseAuditHandler) { + ((BaseAuditHandler) consumer).setParentPath(getName()); + } + + if (consumer instanceof AuditDestination) { + // If consumer is destination, then the thread should run as server user + isConsumerDestination = true; + } + } + + @Override + public void init(Properties props, String basePropertyName) { + LOG.info("BaseAuditProvider.init()"); + + super.init(props, basePropertyName); + + setMaxBatchSize(MiscUtil.getIntProperty(props, propPrefix + "." + PROP_BATCH_SIZE, getMaxBatchSize())); + setMaxQueueSize(MiscUtil.getIntProperty(props, propPrefix + "." + PROP_QUEUE_SIZE, getMaxQueueSize())); + setMaxBatchInterval(MiscUtil.getIntProperty(props, propPrefix + "." + PROP_BATCH_INTERVAL, getMaxBatchInterval())); + + fileSpoolerEnabled = MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_FILE_SPOOL_ENABLE, false); + + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR); + + if (fileSpoolerEnabled || logFolderProp != null) { + LOG.info("File spool is enabled for {}, logFolderProp={}, {}.{}={}", getName(), logFolderProp, propPrefix, AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR, fileSpoolerEnabled); + + fileSpoolerEnabled = true; + fileSpoolMaxWaitTime = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN, fileSpoolMaxWaitTime); + fileSpoolDrainThresholdPercent = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILE_SPOOL_QUEUE_THRESHOLD, fileSpoolDrainThresholdPercent); + fileSpooler = new AuditFileSpool(this, consumer); + + if (!fileSpooler.init(props, basePropertyName)) { + fileSpoolerEnabled = false; + + LOG.error("Couldn't initialize file spooler. Disabling it. queue={}, consumer={}", getName(), consumer.getName()); + } + } else { + LOG.info("File spool is disabled for {}", getName()); + } + } + + @Override + public void setParentPath(String parentPath) { + super.setParentPath(parentPath); + + if (consumer instanceof BaseAuditHandler) { + ((BaseAuditHandler) consumer).setParentPath(getName()); + } + } + + @Override + public String getFinalPath() { + if (consumer != null) { + if (consumer instanceof BaseAuditHandler) { + return ((BaseAuditHandler) consumer).getFinalPath(); + } else { + return consumer.getName(); + } + } + + return getName(); + } + + @Override + public void setName(String name) { + super.setName(name); + + if (consumer instanceof BaseAuditHandler) { + ((BaseAuditHandler) consumer).setParentPath(getName()); + } + } + + public AuditHandler getConsumer() { + return consumer; + } + + public boolean isDrainMaxTimeElapsed() { + return (stopTime - System.currentTimeMillis()) > AUDIT_CONSUMER_THREAD_WAIT_MS; + } + + public boolean isDrain() { + return isDrain; + } + + public void setDrain(boolean isDrain) { + if (isDrain && stopTime != 0) { + stopTime = System.currentTimeMillis(); + } + + this.isDrain = isDrain; + } + + public int getMaxQueueSize() { + return maxQueueSize; + } + + public void setMaxQueueSize(int maxQueueSize) { + this.maxQueueSize = maxQueueSize; + } + + public int getMaxBatchInterval() { + return maxBatchInterval; + } + + public void setMaxBatchInterval(int maxBatchInterval) { + this.maxBatchInterval = maxBatchInterval; + } + + public int getMaxBatchSize() { + return maxBatchSize; + } + + public void setMaxBatchSize(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete() + */ + @Override + public void waitToComplete() { + if (consumer != null) { + consumer.waitToComplete(-1); + } + } + + @Override + public void waitToComplete(long timeout) { + if (consumer != null) { + consumer.waitToComplete(timeout); + } + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#flush() + */ + @Override + public void flush() { + if (consumer != null) { + consumer.flush(); + } + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java index cef23db6dc..8ecd6fd5be 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java @@ -19,6 +19,13 @@ package org.apache.ranger.audit.queue; +import org.apache.ranger.audit.model.AuditEventBase; +import org.apache.ranger.audit.provider.AuditHandler; +import org.apache.ranger.audit.provider.MiscUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + import java.util.ArrayList; import java.util.Collection; import java.util.Date; @@ -28,238 +35,240 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; -import org.apache.ranger.audit.model.AuditEventBase; -import org.apache.ranger.audit.provider.AuditHandler; -import org.apache.ranger.audit.provider.MiscUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.MDC; - /** * This is a non-blocking queue with no limit on capacity. */ public class AuditSummaryQueue extends AuditQueue implements Runnable { - private static final Logger logger = LoggerFactory - .getLogger(AuditSummaryQueue.class); - - public static final String PROP_SUMMARY_INTERVAL = "summary.interval.ms"; - - LinkedBlockingQueue queue = new LinkedBlockingQueue(); - Thread consumerThread = null; - - static int threadCount = 0; - static final String DEFAULT_NAME = "summary"; - - private static final int MAX_DRAIN = 100000; - - private int maxSummaryIntervalMs = 5000; - - HashMap summaryMap = new HashMap(); - - public AuditSummaryQueue(AuditHandler consumer) { - super(consumer); - setName(DEFAULT_NAME); - } - - @Override - public void init(Properties props, String propPrefix) { - super.init(props, propPrefix); - maxSummaryIntervalMs = MiscUtil.getIntProperty(props, propPrefix + "." - + PROP_SUMMARY_INTERVAL, maxSummaryIntervalMs); - logger.info("maxSummaryInterval=" + maxSummaryIntervalMs + ", name=" - + getName()); - } - - /* - * (non-Javadoc) - * - * @see - * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. - * audit.model.AuditEventBase) - */ - @Override - public boolean log(AuditEventBase event) { - // Add to the queue and return ASAP - if (queue.size() >= getMaxQueueSize()) { - return false; - } - queue.add(event); - return true; - } - - @Override - public boolean log(Collection events) { - boolean ret = true; - for (AuditEventBase event : events) { - ret = log(event); - if (!ret) { - break; - } - } - return ret; - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#start() - */ - @Override - public void start() { - if (consumer != null) { - consumer.start(); - } - - consumerThread = new Thread(this, this.getClass().getName() - + (threadCount++)); - consumerThread.setDaemon(true); - consumerThread.start(); - } - - /* - * (non-Javadoc) - * - * @see org.apache.ranger.audit.provider.AuditProvider#stop() - */ - @Override - public void stop() { - logger.info("Stop called. name=" + getName()); - setDrain(true); - try { - if (consumerThread != null) { - logger.info("Interrupting consumerThread. name=" + getName() - + ", consumer=" - + (consumer == null ? null : consumer.getName())); - - consumerThread.interrupt(); - } - } catch (Throwable t) { - // ignore any exception - } - consumerThread = null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Runnable#run() - */ - @Override - public void run() { - try { - //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox - MDC.clear(); - runLogAudit(); - } catch (Throwable t) { - logger.error("Exited thread without abnormaly. queue=" + getName(), - t); - } - } - - public void runLogAudit() { - - long lastDispatchTime = System.currentTimeMillis(); - - while (true) { - // Time to next dispatch - long nextDispatchDuration = lastDispatchTime - - System.currentTimeMillis() + maxSummaryIntervalMs; - - Collection eventList = new ArrayList(); - - try { - AuditEventBase event = null; - if (!isDrain() && nextDispatchDuration > 0) { - event = queue.poll(nextDispatchDuration, - TimeUnit.MILLISECONDS); - } else { - // For poll() is non blocking - event = queue.poll(); - } - - if (event != null) { - eventList.add(event); - queue.drainTo(eventList, MAX_DRAIN - 1); - } else { - // poll returned due to timeout, so reseting clock - nextDispatchDuration = lastDispatchTime - - System.currentTimeMillis() + maxSummaryIntervalMs; - lastDispatchTime = System.currentTimeMillis(); - } - } catch (InterruptedException e) { - logger.info("Caught exception in consumer thread. Shutdown might be in progress"); - } catch (Throwable t) { - logger.error("Caught error during processing request.", t); - } - - for (AuditEventBase event : eventList) { - // Add to hash map - String key = event.getEventKey(); - AuditSummary auditSummary = summaryMap.get(key); - if (auditSummary == null) { - auditSummary = new AuditSummary(); - auditSummary.event = event; - auditSummary.startTime = event.getEventTime(); - auditSummary.endTime = event.getEventTime(); - auditSummary.count = 1; - summaryMap.put(key, auditSummary); - } else { - auditSummary.endTime = event.getEventTime(); - auditSummary.count++; - } - } - - if (isDrain() || nextDispatchDuration <= 0) { - // Reset time just before sending the logs - lastDispatchTime = System.currentTimeMillis(); - - for (Map.Entry entry : summaryMap - .entrySet()) { - AuditSummary auditSummary = entry.getValue(); - auditSummary.event.setEventCount(auditSummary.count); - long timeDiff = auditSummary.endTime.getTime() - - auditSummary.startTime.getTime(); - timeDiff = timeDiff > 0 ? timeDiff : 1; - auditSummary.event.setEventDurationMS(timeDiff); - boolean ret = consumer.log(auditSummary.event); - if (!ret) { - // We need to drop this event - logFailedEvent(auditSummary.event); - } - } - summaryMap.clear(); - } - - if (isDrain()) { - if (summaryMap.isEmpty() && queue.isEmpty()) { - break; - } - if (isDrainMaxTimeElapsed()) { - logger.warn("Exiting polling loop because max time allowed reached. name=" - + getName() - + ", waited for " - + (stopTime - System.currentTimeMillis()) + " ms"); - } - } - - } - - logger.info("Exiting polling loop. name=" + getName()); - try { - // Call stop on the consumer - logger.info("Calling to stop consumer. name=" + getName() - + ", consumer.name=" + consumer.getName()); - consumer.stop(); - } catch (Throwable t) { - logger.error("Error while calling stop on consumer.", t); - } - logger.info("Exiting consumerThread.run() method. name=" + getName()); - } - - class AuditSummary { - Date startTime = null; - Date endTime = null; - int count = 0; - AuditEventBase event; - } + private static final Logger logger = LoggerFactory.getLogger(AuditSummaryQueue.class); + + public static final String PROP_SUMMARY_INTERVAL = "summary.interval.ms"; + + private static final int MAX_DRAIN = 100000; + + static final String DEFAULT_NAME = "summary"; + static int threadCount; + + Thread consumerThread; + LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); + HashMap summaryMap = new HashMap<>(); + + private int maxSummaryIntervalMs = 5000; + + public AuditSummaryQueue(AuditHandler consumer) { + super(consumer); + + setName(DEFAULT_NAME); + } + + @Override + public void init(Properties props, String propPrefix) { + super.init(props, propPrefix); + + maxSummaryIntervalMs = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_SUMMARY_INTERVAL, maxSummaryIntervalMs); + + logger.info("maxSummaryInterval={}, name={}", maxSummaryIntervalMs, getName()); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger. + * audit.model.AuditEventBase) + */ + @Override + public boolean log(AuditEventBase event) { + // Add to the queue and return ASAP + if (queue.size() >= getMaxQueueSize()) { + return false; + } + + queue.add(event); + + return true; + } + + @Override + public boolean log(Collection events) { + boolean ret = true; + + for (AuditEventBase event : events) { + ret = log(event); + + if (!ret) { + break; + } + } + + return ret; + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#start() + */ + @Override + public void start() { + if (consumer != null) { + consumer.start(); + } + + consumerThread = new Thread(this, this.getClass().getName() + (threadCount++)); + + consumerThread.setDaemon(true); + consumerThread.start(); + } + + /* + * (non-Javadoc) + * + * @see org.apache.ranger.audit.provider.AuditProvider#stop() + */ + @Override + public void stop() { + logger.info("Stop called. name={}", getName()); + + setDrain(true); + + try { + if (consumerThread != null) { + logger.info("Interrupting consumerThread. name={}, consumer={}", getName(), (consumer == null ? null : consumer.getName())); + + consumerThread.interrupt(); + } + } catch (Throwable t) { + // ignore any exception + } + + consumerThread = null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Runnable#run() + */ + @Override + public void run() { + try { + //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox + MDC.clear(); + runLogAudit(); + } catch (Throwable t) { + logger.error("Exited thread without abnormaly. queue={}", getName(), t); + } + } + + public void runLogAudit() { + long lastDispatchTime = System.currentTimeMillis(); + + while (true) { + // Time to next dispatch + long nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + maxSummaryIntervalMs; + + Collection eventList = new ArrayList<>(); + + try { + final AuditEventBase event; + + if (!isDrain() && nextDispatchDuration > 0) { + event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS); + } else { + // For poll() is non blocking + event = queue.poll(); + } + + if (event != null) { + eventList.add(event); + + queue.drainTo(eventList, MAX_DRAIN - 1); + } else { + // poll returned due to timeout, so reseting clock + nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + maxSummaryIntervalMs; + lastDispatchTime = System.currentTimeMillis(); + } + } catch (InterruptedException e) { + logger.info("Caught exception in consumer thread. Shutdown might be in progress"); + } catch (Throwable t) { + logger.error("Caught error during processing request.", t); + } + + for (AuditEventBase event : eventList) { + // Add to hash map + String key = event.getEventKey(); + AuditSummary auditSummary = summaryMap.get(key); + + if (auditSummary == null) { + auditSummary = new AuditSummary(); + + auditSummary.event = event; + auditSummary.startTime = event.getEventTime(); + auditSummary.endTime = event.getEventTime(); + auditSummary.count = 1; + + summaryMap.put(key, auditSummary); + } else { + auditSummary.endTime = event.getEventTime(); + + auditSummary.count++; + } + } + + if (isDrain() || nextDispatchDuration <= 0) { + // Reset time just before sending the logs + lastDispatchTime = System.currentTimeMillis(); + + for (Map.Entry entry : summaryMap.entrySet()) { + AuditSummary auditSummary = entry.getValue(); + + auditSummary.event.setEventCount(auditSummary.count); + + long timeDiff = auditSummary.endTime.getTime() - auditSummary.startTime.getTime(); + + timeDiff = timeDiff > 0 ? timeDiff : 1; + + auditSummary.event.setEventDurationMS(timeDiff); + + boolean ret = consumer.log(auditSummary.event); + + if (!ret) { + // We need to drop this event + logFailedEvent(auditSummary.event); + } + } + + summaryMap.clear(); + } + + if (isDrain()) { + if (summaryMap.isEmpty() && queue.isEmpty()) { + break; + } + + if (isDrainMaxTimeElapsed()) { + logger.warn("Exiting polling loop because max time allowed reached. name={}, waited for {} ms", getName(), stopTime - System.currentTimeMillis()); + } + } + } + + logger.info("Exiting polling loop. name={}", getName()); + + try { + // Call stop on the consumer + logger.info("Calling to stop consumer. name={}, consumer.name={}", getName(), consumer.getName()); + + consumer.stop(); + } catch (Throwable t) { + logger.error("Error while calling stop on consumer.", t); + } + logger.info("Exiting consumerThread.run() method. name={}", getName()); + } + + static class AuditSummary { + Date startTime; + Date endTime; + int count; + AuditEventBase event; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java index 5bc3b96c2d..b0f3d103bd 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java @@ -17,7 +17,8 @@ * under the License. */ - package org.apache.ranger.audit.test; +package org.apache.ranger.audit.test; + import org.apache.ranger.audit.model.AuditEventBase; import org.apache.ranger.audit.model.AuthzAuditEvent; import org.apache.ranger.audit.model.EnumRepositoryType; @@ -32,26 +33,29 @@ import java.util.Properties; public class TestEvents { - private static final Logger LOG = LoggerFactory.getLogger(TestEvents.class); + private static final String AUDIT_PROPERTIES_FILE = "xasecure-audit.properties"; + + private TestEvents() { + // to block instantiation + } + public static void main(String[] args) { LOG.info("==> TestEvents.main()"); try { Properties auditProperties = new Properties(); - String AUDIT_PROPERTIES_FILE = "xasecure-audit.properties"; - File propFile = new File(AUDIT_PROPERTIES_FILE); - if(!propFile.exists()) { - LOG.info("Loading Audit properties file" + AUDIT_PROPERTIES_FILE); - try(FileInputStream fileInputStream = new FileInputStream(propFile)) { + if (!propFile.exists()) { + LOG.info("Loading Audit properties file {}", AUDIT_PROPERTIES_FILE); + try (FileInputStream fileInputStream = new FileInputStream(propFile)) { auditProperties.load(fileInputStream); } } else { - LOG.info("Audit properties file missing: " + AUDIT_PROPERTIES_FILE); + LOG.info("Audit properties file missing: {}", AUDIT_PROPERTIES_FILE); auditProperties.setProperty("xasecure.audit.is.enabled", "true"); auditProperties.setProperty("xasecure.audit.log4j.is.enabled", "false"); @@ -65,7 +69,7 @@ public static void main(String[] args) { AuditHandler provider = factory.getAuditProvider(); - LOG.info("provider=" + provider.toString()); + LOG.info("provider={}", provider); String strEventCount = args.length > 0 ? args[0] : auditProperties.getProperty("xasecure.audit.test.event.count"); String strEventPauseTimeInMs = args.length > 1 ? args[1] : auditProperties.getProperty("xasecure.audit.test.event.pause.time.ms"); @@ -75,13 +79,14 @@ public static void main(String[] args) { int eventPauseTime = (strEventPauseTimeInMs == null) ? 0 : Integer.parseInt(strEventPauseTimeInMs); int sleepTimeBeforeExit = ((strSleepTimeBeforeExit == null) ? 0 : Integer.parseInt(strSleepTimeBeforeExit)) * 1000; - for(int i = 0; i < eventCount; i++) { + for (int i = 0; i < eventCount; i++) { AuditEventBase event = getTestEvent(i); - LOG.info("==> TestEvents.main(" + (i+1) + "): adding " + event.getClass().getName()); + LOG.info("==> TestEvents.main({}): adding {}", (i + 1), event.getClass().getName()); + provider.log(event); - if(eventPauseTime > 0) { + if (eventPauseTime > 0) { Thread.sleep(eventPauseTime); } } @@ -90,18 +95,18 @@ public static void main(String[] args) { // incase of HdfsAuditProvider, logs are saved to local file system which gets sent to HDFS asynchronusly in a separate thread. // So, at this point it is possible that few local log files haven't made to HDFS. - if(sleepTimeBeforeExit > 0) { - LOG.info("waiting for " + sleepTimeBeforeExit + "ms before exiting.."); + if (sleepTimeBeforeExit > 0) { + LOG.info("waiting for {}ms before exiting..", sleepTimeBeforeExit); try { Thread.sleep(sleepTimeBeforeExit); - } catch(Exception excp) { + } catch (Exception excp) { LOG.info("error while waiting before exiting.."); } } provider.stop(); - } catch(Exception excp) { + } catch (Exception excp) { LOG.info(excp.getLocalizedMessage()); excp.printStackTrace(); } @@ -113,48 +118,48 @@ private static AuditEventBase getTestEvent(int idx) { AuthzAuditEvent event = new AuthzAuditEvent(); event.setClientIP("127.0.0.1"); - event.setAccessResult((short)(idx % 2 > 0 ? 1 : 0)); + event.setAccessResult((short) (idx % 2 > 0 ? 1 : 0)); event.setAclEnforcer("ranger-acl"); - switch(idx % 5) { + switch (idx % 5) { case 0: event.setRepositoryName("hdfsdev"); event.setRepositoryType(EnumRepositoryType.HDFS); event.setResourcePath("/tmp/test-audit.log"); event.setResourceType("file"); event.setAccessType("read"); - if(idx % 2 > 0) { + if (idx % 2 > 0) { event.setAclEnforcer("hadoop-acl"); } - break; + break; case 1: event.setRepositoryName("hbasedev"); event.setRepositoryType(EnumRepositoryType.HBASE); event.setResourcePath("test_table/test_cf/test_col"); event.setResourceType("column"); event.setAccessType("read"); - break; + break; case 2: event.setRepositoryName("hivedev"); event.setRepositoryType(EnumRepositoryType.HIVE); event.setResourcePath("test_database/test_table/test_col"); event.setResourceType("column"); event.setAccessType("select"); - break; + break; case 3: event.setRepositoryName("knoxdev"); event.setRepositoryType(EnumRepositoryType.KNOX); event.setResourcePath("topologies/ranger-admin"); event.setResourceType("service"); event.setAccessType("get"); - break; + break; case 4: event.setRepositoryName("stormdev"); event.setRepositoryType(EnumRepositoryType.STORM); event.setResourcePath("topologies/read-finance-stream"); event.setResourceType("topology"); event.setAccessType("submit"); - break; + break; } event.setEventTime(new Date()); event.setResultReason(Integer.toString(idx)); diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java index fd1c96e90e..47d16f31a6 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java @@ -27,6 +27,7 @@ import javax.security.auth.kerberos.KerberosTicket; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; + import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; @@ -36,11 +37,9 @@ import java.util.concurrent.atomic.AtomicBoolean; public abstract class AbstractKerberosUser implements KerberosUser { - private static final Logger LOG = LoggerFactory.getLogger(AbstractKerberosUser.class); static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'Z'"; - /** * Percentage of the ticket window to use before we renew the TGT. */ @@ -48,7 +47,7 @@ public abstract class AbstractKerberosUser implements KerberosUser { protected final AtomicBoolean loggedIn = new AtomicBoolean(false); - protected Subject subject; + protected Subject subject; protected LoginContext loginContext; public AbstractKerberosUser() { @@ -68,31 +67,32 @@ public synchronized void login() throws LoginException { try { // If it's the first time ever calling login then we need to initialize a new context if (loginContext == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Initializing new login context..."); - } + LOG.debug("Initializing new login context..."); + if (this.subject == null) { // only create a new subject if a current one does not exist // other classes may be referencing an existing subject and replacing it may break functionality of those other classes after relogin this.subject = new Subject(); } + this.loginContext = createLoginContext(subject); } loginContext.login(); loggedIn.set(true); + if (LOG.isDebugEnabled()) { - LOG.debug("Successful login for {}", new Object[]{getPrincipal()}); + LOG.debug("Successful login for {}", getPrincipal()); } } catch (LoginException le) { LoginException loginException = new LoginException("Unable to login with " + getPrincipal() + " due to: " + le.getMessage()); + loginException.setStackTrace(le.getStackTrace()); + throw loginException; } } - protected abstract LoginContext createLoginContext(final Subject subject) throws LoginException; - /** * Performs a logout of the current user. * @@ -107,7 +107,8 @@ public synchronized void logout() throws LoginException { try { loginContext.logout(); loggedIn.set(false); - LOG.debug("Successful logout for {}", new Object[]{getPrincipal()}); + + LOG.debug("Successful logout for {}", getPrincipal()); loginContext = null; } catch (LoginException e) { @@ -142,8 +143,7 @@ public T doAs(final PrivilegedAction action) throws IllegalStateException * @throws PrivilegedActionException if an exception is thrown from the action */ @Override - public T doAs(final PrivilegedExceptionAction action) - throws IllegalStateException, PrivilegedActionException { + public T doAs(final PrivilegedExceptionAction action) throws IllegalStateException, PrivilegedActionException { if (!isLoggedIn()) { throw new IllegalStateException("Must login before executing actions"); } @@ -159,21 +159,43 @@ public T doAs(final PrivilegedExceptionAction action) @Override public synchronized boolean checkTGTAndRelogin() throws LoginException { final KerberosTicket tgt = getTGT(); + if (tgt == null) { LOG.debug("TGT was not found"); } if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) { LOG.debug("TGT was found, but has not reached expiration window"); + return false; } - LOG.debug("Performing relogin for {}", new Object[]{getPrincipal()}); + LOG.debug("Performing relogin for {}", getPrincipal()); + logout(); login(); + return true; } + /** + * @return true if this user is currently logged in, false otherwise + */ + @Override + public boolean isLoggedIn() { + return loggedIn.get(); + } + + @Override + public String toString() { + return "KerberosUser{" + + "principal='" + getPrincipal() + '\'' + + ", loggedIn=" + loggedIn + + '}'; + } + + protected abstract LoginContext createLoginContext(Subject subject) throws LoginException; + /** * Get the Kerberos TGT. * @@ -204,8 +226,9 @@ private boolean isTGSPrincipal(final KerberosPrincipal principal) { if (principal.getName().equals("krbtgt/" + principal.getRealm() + "@" + principal.getRealm())) { if (LOG.isTraceEnabled()) { - LOG.trace("Found TGT principal: " + principal.getName()); + LOG.trace("Found TGT principal: {}", principal.getName()); } + return true; } @@ -214,33 +237,17 @@ private boolean isTGSPrincipal(final KerberosPrincipal principal) { private long getRefreshTime(final KerberosTicket tgt) { long start = tgt.getStartTime().getTime(); - long end = tgt.getEndTime().getTime(); + long end = tgt.getEndTime().getTime(); if (LOG.isTraceEnabled()) { final SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT); - final String startDate = dateFormat.format(new Date(start)); - final String endDate = dateFormat.format(new Date(end)); - LOG.trace("TGT valid starting at: " + startDate); - LOG.trace("TGT expires at: " + endDate); + final String startDate = dateFormat.format(new Date(start)); + final String endDate = dateFormat.format(new Date(end)); + + LOG.trace("TGT valid starting at: {}", startDate); + LOG.trace("TGT expires at: {}", endDate); } return start + (long) ((end - start) * TICKET_RENEW_WINDOW); } - - /** - * @return true if this user is currently logged in, false otherwise - */ - @Override - public boolean isLoggedIn() { - return loggedIn.get(); - } - - @Override - public String toString() { - return "KerberosUser{" + - "principal='" + getPrincipal() + '\'' + - ", loggedIn=" + loggedIn + - '}'; - } } - diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java index 0e74e3bd4b..dca00ee978 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java @@ -21,7 +21,12 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.CommonPathCapabilities; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.ranger.audit.provider.MiscUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,129 +45,171 @@ public abstract class AbstractRangerAuditWriter implements RangerAuditWriter { private static final Logger logger = LoggerFactory.getLogger(AbstractRangerAuditWriter.class); - public static final String PROP_FILESYSTEM_DIR = "dir"; - public static final String PROP_FILESYSTEM_SUBDIR = "subdir"; - public static final String PROP_FILESYSTEM_FILE_NAME_FORMAT = "filename.format"; - public static final String PROP_FILESYSTEM_FILE_ROLLOVER = "file.rollover.sec"; - public static final String PROP_FILESYSTEM_ROLLOVER_PERIOD = "file.rollover.period"; - public static final String PROP_FILESYSTEM_FILE_EXTENSION = ".log"; - public Configuration conf = null; - public FileSystem fileSystem = null; - public Map auditConfigs = null; - public Path auditPath = null; - public PrintWriter logWriter = null; - public RollingTimeUtil rollingTimeUtil = null; - public String auditProviderName = null; - public String fullPath = null; - public String parentFolder = null; - public String currentFileName = null; - public String logFileNameFormat = null; - public String logFolder = null; - public String fileExtension = null; - public String rolloverPeriod = null; - public String fileSystemScheme = null; - public Date nextRollOverTime = null; - public int fileRolloverSec = 24 * 60 * 60; // In seconds - public boolean rollOverByDuration = false; - public volatile FSDataOutputStream ostream = null; // output stream wrapped in logWriter - private boolean isHFlushCapableStream = false; - protected boolean reUseLastLogFile = false; + public static final String PROP_FILESYSTEM_DIR = "dir"; + public static final String PROP_FILESYSTEM_SUBDIR = "subdir"; + public static final String PROP_FILESYSTEM_FILE_NAME_FORMAT = "filename.format"; + public static final String PROP_FILESYSTEM_FILE_ROLLOVER = "file.rollover.sec"; + public static final String PROP_FILESYSTEM_ROLLOVER_PERIOD = "file.rollover.period"; + public static final String PROP_FILESYSTEM_FILE_EXTENSION = ".log"; + + public Configuration conf; + public FileSystem fileSystem; + public Map auditConfigs; + public Path auditPath; + public PrintWriter logWriter; + public RollingTimeUtil rollingTimeUtil; + public String auditProviderName; + public String fullPath; + public String parentFolder; + public String currentFileName; + public String logFileNameFormat; + public String logFolder; + public String fileExtension; + public String rolloverPeriod; + public String fileSystemScheme; + public Date nextRollOverTime; + public int fileRolloverSec = 24 * 60 * 60; // In seconds + public boolean rollOverByDuration; + + public volatile FSDataOutputStream ostream; // output stream wrapped in logWriter + + protected boolean reUseLastLogFile; + private boolean isHFlushCapableStream; @Override - public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { + public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { // Initialize properties for this class // Initial folder and file properties logger.info("==> AbstractRangerAuditWriter.init()"); + this.auditProviderName = auditProviderName; - this.auditConfigs = auditConfigs; + this.auditConfigs = auditConfigs; - init(props,propPrefix); + init(props, propPrefix); logger.info("<== AbstractRangerAuditWriter.init()"); } - public void createFileSystemFolders() throws Exception { + @Override + public void flush() { + logger.debug("==> AbstractRangerAuditWriter.flush() {}", fileSystemScheme); - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.createFileSystemFolders()"); + if (ostream != null) { + try { + synchronized (this) { + if (ostream != null) { + // 1) PrinterWriter does not have bufferring of its own so + // we need to flush its underlying stream + // 2) HDFS flush() does not really flush all the way to disk. + if (isHFlushCapableStream) { + //Checking HFLUSH capability of the stream because of HADOOP-13327. + //For S3 filesysttem, hflush throws UnsupportedOperationException and hence we call flush. + ostream.hflush(); + } else { + ostream.flush(); + } + } + + logger.debug("Flush {} audit logs completed.....", fileSystemScheme); + } + } catch (IOException e) { + logger.error("Error on flushing log writer: {}\nException will be ignored. name={}, fileName={}", e.getMessage(), auditProviderName, currentFileName); + } } + + logger.debug("<== AbstractRangerAuditWriter.flush()"); + } + + public void createFileSystemFolders() throws Exception { + logger.debug("==> AbstractRangerAuditWriter.createFileSystemFolders()"); + // Create a new file - Date currentTime = new Date(); - String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); - parentFolder = MiscUtil.replaceTokens(logFolder, currentTime.getTime()); - fullPath = parentFolder + Path.SEPARATOR + fileName; - String defaultPath = fullPath; - conf = createConfiguration(); - URI uri = URI.create(fullPath); - fileSystem = FileSystem.get(uri, conf); - auditPath = new Path(fullPath); - fileSystemScheme = getFileSystemScheme(); - logger.info("Checking whether log file exists. "+ fileSystemScheme + "Path= " + fullPath + ", UGI=" + MiscUtil.getUGILoginUser()); + Date currentTime = new Date(); + String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime()); + + parentFolder = MiscUtil.replaceTokens(logFolder, currentTime.getTime()); + fullPath = parentFolder + Path.SEPARATOR + fileName; + + String defaultPath = fullPath; + + conf = createConfiguration(); + + URI uri = URI.create(fullPath); + + fileSystem = FileSystem.get(uri, conf); + auditPath = new Path(fullPath); + fileSystemScheme = getFileSystemScheme(); + + logger.info("Checking whether log file exists. {} Path={}, UGI={}", fileSystemScheme, fullPath, MiscUtil.getUGILoginUser()); + int i = 0; + while (fileSystem.exists(auditPath)) { i++; + int lastDot = defaultPath.lastIndexOf('.'); String baseName = defaultPath.substring(0, lastDot); String extension = defaultPath.substring(lastDot); - fullPath = baseName + "." + i + extension; - auditPath = new Path(fullPath); - logger.info("Checking whether log file exists. "+ fileSystemScheme + "Path= " + fullPath); + + fullPath = baseName + "." + i + extension; + auditPath = new Path(fullPath); + + logger.info("Checking whether log file exists. {} Path={}", fileSystemScheme, fullPath); } - logger.info("Log file doesn't exists. Will create and use it. "+ fileSystemScheme + "Path= " + fullPath); + + logger.info("Log file doesn't exists. Will create and use it. {} Path={}", fileSystemScheme, fullPath); // Create parent folders createParents(auditPath, fileSystem); currentFileName = fullPath; - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.createFileSystemFolders()"); - } + logger.debug("<== AbstractRangerAuditWriter.createFileSystemFolders()"); } - public Configuration createConfiguration() { + public Configuration createConfiguration() { Configuration conf = new Configuration(); + for (Map.Entry entry : auditConfigs.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); + // for ease of install config file may contain properties with empty value, skip those if (StringUtils.isNotEmpty(value)) { conf.set(key, value); } - logger.info("Adding property to "+ fileSystemScheme + " + config: " + key + " => " + value); + + logger.info("Adding property to {} + config: {} => {}", fileSystemScheme, key, value); } - logger.info("Returning " + fileSystemScheme + "Filesystem Config: " + conf.toString()); + logger.info("Returning {} Filesystem Config: {}", fileSystemScheme, conf); + return conf; } - public void createParents(Path pathLogfile, FileSystem fileSystem) - throws Exception { - logger.info("Creating parent folder for " + pathLogfile); + public void createParents(Path pathLogfile, FileSystem fileSystem) throws Exception { + logger.info("Creating parent folder for {}", pathLogfile); + Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null; - if (parentPath != null && fileSystem != null - && !fileSystem.exists(parentPath)) { + if (parentPath != null && fileSystem != null && !fileSystem.exists(parentPath)) { fileSystem.mkdirs(parentPath); } } public void init(Properties props, String propPrefix) { + logger.debug("==> AbstractRangerAuditWriter.init()"); - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.init()"); - } + String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_DIR); - String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_DIR); if (StringUtils.isEmpty(logFolderProp)) { - logger.error("File destination folder is not configured. Please set " - + propPrefix + "." - + PROP_FILESYSTEM_DIR + ". name=" - + auditProviderName); + logger.error("File destination folder is not configured. Please set {}.{}. name={}", propPrefix, PROP_FILESYSTEM_DIR, auditProviderName); + return; } String logSubFolder = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_SUBDIR); + if (StringUtils.isEmpty(logSubFolder)) { logSubFolder = "%app-type%/%time:yyyyMMdd%"; } @@ -180,9 +227,9 @@ public void init(Properties props, String propPrefix) { logFolder = logFolderProp + "/" + logSubFolder; - logger.info("logFolder=" + logFolder + ", destName=" + auditProviderName); - logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="+ auditProviderName); - logger.info("config=" + auditConfigs.toString()); + logger.info("logFolder={}, destName={}", logFolder, auditProviderName); + logger.info("logFileNameFormat={}, destName={}", logFileNameFormat, auditProviderName); + logger.info("config={}", auditConfigs); rolloverPeriod = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_ROLLOVER_PERIOD); rollingTimeUtil = RollingTimeUtil.getInstance(); @@ -190,53 +237,53 @@ public void init(Properties props, String propPrefix) { //file.rollover.period is used for rolling over. If it could compute the next roll over time using file.rollover.period //it fall back to use file.rollover.sec for find next rollover time. If still couldn't find default will be 1day window //for rollover. - if(StringUtils.isEmpty(rolloverPeriod) ) { + if (StringUtils.isEmpty(rolloverPeriod)) { rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec); } try { nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod); - } catch ( Exception e) { - logger.warn("Rollover by file.rollover.period failed...will be using the file.rollover.sec for "+ fileSystemScheme + " audit file rollover...", e); + } catch (Exception e) { + logger.warn("Rollover by file.rollover.period failed...will be using the file.rollover.sec for {} audit file rollover...", fileSystemScheme, e); + rollOverByDuration = true; nextRollOverTime = rollOverByDuration(); } - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.init()"); - } - + logger.debug("<== AbstractRangerAuditWriter.init()"); } public void closeFileIfNeeded() { - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.closeFileIfNeeded()"); - } + logger.debug("==> AbstractRangerAuditWriter.closeFileIfNeeded()"); if (logWriter == null) { - if (logger.isDebugEnabled()){ - logger.debug("Log writer is null, aborting rollover condition check!"); - } + logger.debug("Log writer is null, aborting rollover condition check!"); + return; } - if ( System.currentTimeMillis() >= nextRollOverTime.getTime() ) { + if (System.currentTimeMillis() >= nextRollOverTime.getTime()) { logger.info("Closing file. Rolling over. name = {}, fileName = {}", auditProviderName, currentFileName); + logWriter.flush(); + closeWriter(); resetWriter(); - currentFileName = null; + + currentFileName = null; reUseLastLogFile = false; if (!rollOverByDuration) { try { - if(StringUtils.isEmpty(rolloverPeriod) ) { + if (StringUtils.isEmpty(rolloverPeriod)) { rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec); } + nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod); - } catch ( Exception e) { + } catch (Exception e) { logger.warn("Rollover by file.rollover.period failed", e); logger.warn("Using the file.rollover.sec for {} audit file rollover...", fileSystemScheme); + nextRollOverTime = rollOverByDuration(); } } else { @@ -244,48 +291,47 @@ public void closeFileIfNeeded() { } } - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.closeFileIfNeeded()"); - } + logger.debug("<== AbstractRangerAuditWriter.closeFileIfNeeded()"); } - public Date rollOverByDuration() { - long rollOverTime = rollingTimeUtil.computeNextRollingTime(fileRolloverSec,nextRollOverTime); + public Date rollOverByDuration() { + long rollOverTime = rollingTimeUtil.computeNextRollingTime(fileRolloverSec, nextRollOverTime); + return new Date(rollOverTime); } public PrintWriter createWriter() throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.createWriter()"); - } + logger.debug("==> AbstractRangerAuditWriter.createWriter()"); if (logWriter == null) { boolean appendMode = false; + // if append is supported, reuse last log file if (reUseLastLogFile && fileSystem.hasPathCapability(auditPath, CommonPathCapabilities.FS_APPEND)) { logger.info("Appending to last log file. auditPath = {}", fullPath); + try { - ostream = fileSystem.append(auditPath); + ostream = fileSystem.append(auditPath); appendMode = true; - } catch (Exception e){ + } catch (Exception e) { logger.error("Failed to append to file {} due to {}", fullPath, e.getMessage()); logger.info("Falling back to create a new log file!"); - appendMode = false; } } + if (!appendMode) { // Create the file to write logger.info("Creating new log file. auditPath = {}", fullPath); + createFileSystemFolders(); + ostream = fileSystem.create(auditPath); } logWriter = new PrintWriter(ostream); isHFlushCapableStream = ostream.hasCapability(StreamCapabilities.HFLUSH); } - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.createWriter()"); - } + logger.debug("<== AbstractRangerAuditWriter.createWriter()"); return logWriter; } @@ -294,9 +340,7 @@ public PrintWriter createWriter() throws Exception { * Closes the writer after writing audits **/ public void closeWriter() { - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.closeWriter()"); - } + logger.debug("==> AbstractRangerAuditWriter.closeWriter()"); if (ostream != null) { try { @@ -305,88 +349,49 @@ public void closeWriter() { logger.error("Error closing the stream {}", e.getMessage()); } } - if (logWriter != null) - logWriter.close(); - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.closeWriter()"); + if (logWriter != null) { + logWriter.close(); } + + logger.debug("<== AbstractRangerAuditWriter.closeWriter()"); } public void resetWriter() { - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.resetWriter()"); - } + logger.debug("==> AbstractRangerAuditWriter.resetWriter()"); logWriter = null; - ostream = null; + ostream = null; - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.resetWriter()"); - } - } - - @Override - public void flush() { - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.flush() " + fileSystemScheme); - } - if (ostream != null) { - try { - synchronized (this) { - if (ostream != null) - // 1) PrinterWriter does not have bufferring of its own so - // we need to flush its underlying stream - // 2) HDFS flush() does not really flush all the way to disk. - if (isHFlushCapableStream) { - //Checking HFLUSH capability of the stream because of HADOOP-13327. - //For S3 filesysttem, hflush throws UnsupportedOperationException and hence we call flush. - ostream.hflush(); - } else { - ostream.flush(); - } - if (logger.isDebugEnabled()) { - logger.debug("Flush " + fileSystemScheme + " audit logs completed....."); - } - } - } catch (IOException e) { - logger.error("Error on flushing log writer: " + e.getMessage() + - "\nException will be ignored. name=" + auditProviderName + ", fileName=" + currentFileName); - } - } - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.flush()"); - } + logger.debug("<== AbstractRangerAuditWriter.resetWriter()"); } public boolean logFileToHDFS(File file) throws Exception { + logger.debug("==> AbstractRangerAuditWriter.logFileToHDFS()"); + boolean ret = false; - if (logger.isDebugEnabled()) { - logger.debug("==> AbstractRangerAuditWriter.logFileToHDFS()"); - } if (logWriter == null) { // Create the file to write createFileSystemFolders(); - logger.info("Copying the Audit File" + file.getName() + " to HDFS Path" + fullPath); + + logger.info("Copying the Audit File {} to HDFS Path {}", file.getName(), fullPath); + Path destPath = new Path(fullPath); - ret = FileUtil.copy(file,fileSystem,destPath,false,conf); - } - if (logger.isDebugEnabled()) { - logger.debug("<== AbstractRangerAuditWriter.logFileToHDFS()"); + ret = FileUtil.copy(file, fileSystem, destPath, false, conf); } + + logger.debug("<== AbstractRangerAuditWriter.logFileToHDFS()"); + return ret; } public String getFileSystemScheme() { - String ret = null; - ret = logFolder.substring(0, (logFolder.indexOf(":"))); - ret = ret.toUpperCase(); - return ret; + return logFolder.substring(0, (logFolder.indexOf(":"))).toUpperCase(); } public void setFileExtension(String fileExtension) { - this.fileExtension = fileExtension; + this.fileExtension = fileExtension; } -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java index 2b59ab6802..52a9af7bf9 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -23,10 +23,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.Configuration; + import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Files; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -36,9 +39,6 @@ import java.util.StringTokenizer; import java.util.TreeSet; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.Configuration; - /** * InMemoryJAASConfiguration * @@ -54,7 +54,6 @@ * xasecure.audit.jaas.KafkaClient.option.serviceName = kafka * xasecure.audit.jaas.KafkaClient.option.keyTab = /etc/security/keytabs/kafka_client.keytab * xasecure.audit.jaas.KafkaClient.option.principal = kafka-client-1@EXAMPLE.COM - * xasecure.audit.jaas.MyClient.0.loginModuleName = com.sun.security.auth.module.Krb5LoginModule * xasecure.audit.jaas.MyClient.0.loginModuleControlFlag = required * xasecure.audit.jaas.MyClient.0.option.useKeyTab = true @@ -70,7 +69,6 @@ * xasecure.audit.jaas.MyClient.1.option.serviceName = kafka * xasecure.audit.jaas.MyClient.1.option.keyTab = /etc/security/keytabs/kafka_client.keytab * xasecure.audit.jaas.MyClient.1.option.principal = kafka-client-1@EXAMPLE.COM - * This will set the JAAS configuration - equivalent to the jaas.conf file entries: * KafkaClient { * com.sun.security.auth.module.Krb5LoginModule required @@ -115,7 +113,6 @@ */ public final class InMemoryJAASConfiguration extends Configuration { - private static final Logger LOG = LoggerFactory.getLogger(InMemoryJAASConfiguration.class); public static final String JAAS_CONFIG_PREFIX_PARAM = "xasecure.audit.jaas."; @@ -127,10 +124,16 @@ public final class InMemoryJAASConfiguration extends Configuration { private final Configuration parent; private final Map> applicationConfigEntryMap = new HashMap<>(); + private InMemoryJAASConfiguration(Properties prop) { + parent = Configuration.getConfiguration(); + + initialize(prop); + } + public static InMemoryJAASConfiguration init(String propFile) throws Exception { - LOG.debug("==> InMemoryJAASConfiguration.init( {} ) ", propFile); + LOG.debug("==> InMemoryJAASConfiguration.init( {} ) ", propFile); - InMemoryJAASConfiguration ret = null; + InMemoryJAASConfiguration ret; InputStream in = null; try { @@ -142,8 +145,9 @@ public static InMemoryJAASConfiguration init(String propFile) throws Exception { if (!propFile.startsWith("/")) { in = ClassLoader.getSystemResourceAsStream("/" + propFile); } + if (in == null) { - in = new FileInputStream(new File(propFile)); + in = Files.newInputStream(new File(propFile).toPath()); } } @@ -153,26 +157,26 @@ public static InMemoryJAASConfiguration init(String propFile) throws Exception { } catch (IOException e) { throw new Exception("Failed to load JAAS application properties", e); } finally { - if ( in != null) { - try { - in.close(); - } catch ( Exception e) { - //Ignore - } - } + if (in != null) { + try { + in.close(); + } catch (Exception e) { + //Ignore + } + } } LOG.debug("<== InMemoryJAASConfiguration.init( {} ) ", propFile); - return ret; + return ret; } public static InMemoryJAASConfiguration init(Properties properties) throws Exception { - LOG.debug("==> InMemoryJAASConfiguration.init()"); + LOG.debug("==> InMemoryJAASConfiguration.init()"); - InMemoryJAASConfiguration ret = null; + InMemoryJAASConfiguration ret; - if (properties != null && MapUtils.isNotEmpty(properties)) { + if (MapUtils.isNotEmpty(properties)) { ret = new InMemoryJAASConfiguration(properties); } else { throw new Exception("Failed to load JAAS application properties: properties NULL or empty!"); @@ -180,7 +184,7 @@ public static InMemoryJAASConfiguration init(Properties properties) throws Excep LOG.debug("<== InMemoryJAASConfiguration.init()"); - return ret; + return ret; } @Override @@ -196,7 +200,7 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) { if (ret == null || ret.length == 0) { List retList = applicationConfigEntryMap.get(name); - if (retList != null && retList.size() > 0) { + if (retList != null && !retList.isEmpty()) { ret = retList.toArray(new AppConfigurationEntry[retList.size()]); } } @@ -208,19 +212,13 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) { return ret; } - private InMemoryJAASConfiguration(Properties prop) { - parent = Configuration.getConfiguration(); - - initialize(prop); - } - private void initialize(Properties properties) { - LOG.debug("==> InMemoryJAASConfiguration.initialize()"); + LOG.debug("==> InMemoryJAASConfiguration.initialize()"); int prefixLen = JAAS_CONFIG_PREFIX_PARAM.length(); Map> jaasClients = new HashMap<>(); - for(String key : properties.stringPropertyNames()) { + for (String key : properties.stringPropertyNames()) { if (key.startsWith(JAAS_CONFIG_PREFIX_PARAM)) { String jaasKey = key.substring(prefixLen); StringTokenizer tokenizer = new StringTokenizer(jaasKey, "."); @@ -228,17 +226,9 @@ private void initialize(Properties properties) { if (tokenCount > 0) { String clientId = tokenizer.nextToken(); - SortedSet indexList = jaasClients.get(clientId); - - if (indexList == null) { - indexList = new TreeSet<>(); - - jaasClients.put(clientId, indexList); - } - - String indexStr = tokenizer.nextToken(); - int indexId = isNumeric(indexStr) ? Integer.parseInt(indexStr) : -1; - Integer clientIdIndex = Integer.valueOf(indexId); + SortedSet indexList = jaasClients.computeIfAbsent(clientId, k -> new TreeSet<>()); + String indexStr = tokenizer.nextToken(); + Integer clientIdIndex = isNumeric(indexStr) ? Integer.parseInt(indexStr) : -1; if (!indexList.contains(clientIdIndex)) { indexList.add(clientIdIndex); @@ -247,21 +237,20 @@ private void initialize(Properties properties) { } } - for(String jaasClient : jaasClients.keySet()) { - for(Integer index : jaasClients.get(jaasClient)) { + for (String jaasClient : jaasClients.keySet()) { + for (Integer index : jaasClients.get(jaasClient)) { String keyPrefix = JAAS_CONFIG_PREFIX_PARAM + jaasClient + "."; if (index > -1) { - keyPrefix = keyPrefix + String.valueOf(index) + "."; + keyPrefix = keyPrefix + index + "."; } String keyParam = keyPrefix + JAAS_CONFIG_LOGIN_MODULE_NAME_PARAM; String loginModuleName = properties.getProperty(keyParam); if (loginModuleName == null) { - LOG.error("Unable to add JAAS configuration for " - + "client [" + jaasClient + "] as it is missing param [" + keyParam + "]." - + " Skipping JAAS config for [" + jaasClient + "]"); + LOG.error("Unable to add JAAS configuration for client [{}] as it is missing param [{}]. Skipping JAAS config for [{}]", jaasClient, keyParam, jaasClient); + continue; } else { loginModuleName = loginModuleName.trim(); @@ -271,29 +260,35 @@ private void initialize(Properties properties) { String controlFlag = properties.getProperty(keyParam); - AppConfigurationEntry.LoginModuleControlFlag loginControlFlag = null; + AppConfigurationEntry.LoginModuleControlFlag loginControlFlag; if (controlFlag != null) { controlFlag = controlFlag.trim().toLowerCase(); - if (controlFlag.equals("optional")) { - loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL; - } else if (controlFlag.equals("requisite")) { - loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE; - } else if (controlFlag.equals("sufficient")) { - loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT; - } else if (controlFlag.equals("required")) { - loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED; - } else { - String validValues = "optional|requisite|sufficient|required"; - LOG.warn("Unknown JAAS configuration value for (" + keyParam - + ") = [" + controlFlag + "], valid value are [" + validValues - + "] using the default value, REQUIRED"); - loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED; + switch (controlFlag) { + case "optional": + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL; + break; + case "requisite": + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE; + break; + case "sufficient": + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT; + break; + case "required": + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED; + break; + default: + String validValues = "optional|requisite|sufficient|required"; + + LOG.warn("Unknown JAAS configuration value for ({}) = [{}], valid value are [{}] using the default value, REQUIRED", keyParam, controlFlag, validValues); + + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED; + break; } } else { - LOG.warn("Unable to find JAAS configuration (" - + keyParam + "); using the default value, REQUIRED"); + LOG.warn("Unable to find JAAS configuration ({}); using the default value, REQUIRED", keyParam); + loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED; } @@ -301,7 +296,7 @@ private void initialize(Properties properties) { String optionPrefix = keyPrefix + JAAS_CONFIG_LOGIN_OPTIONS_PREFIX + "."; int optionPrefixLen = optionPrefix.length(); - for(String key : properties.stringPropertyNames()) { + for (String key : properties.stringPropertyNames()) { if (key.startsWith(optionPrefix)) { String optionKey = key.substring(optionPrefixLen); String optionVal = properties.getProperty(key); @@ -314,8 +309,7 @@ private void initialize(Properties properties) { optionVal = SecurityUtil.getServerPrincipal(optionVal, (String) null); } } catch (IOException e) { - LOG.warn("Failed to build serverPrincipal. Using provided value:[" - + optionVal + "]"); + LOG.warn("Failed to build serverPrincipal. Using provided value:[{}]", optionVal); } } @@ -341,13 +335,7 @@ private void initialize(Properties properties) { LOG.debug(sb.toString()); } - List retList = applicationConfigEntryMap.get(jaasClient); - - if (retList == null) { - retList = new ArrayList<>(); - - applicationConfigEntryMap.put(jaasClient, retList); - } + List retList = applicationConfigEntryMap.computeIfAbsent(jaasClient, k -> new ArrayList<>()); retList.add(entry); } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java index 1bbbca8d1e..4754b15d1f 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java @@ -23,6 +23,7 @@ import org.slf4j.Logger; import javax.security.auth.login.LoginException; + import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; @@ -30,17 +31,15 @@ * Helper class for processors to perform an action as a KerberosUser. */ public class KerberosAction { - - private final KerberosUser kerberosUser; + private final KerberosUser kerberosUser; private final PrivilegedExceptionAction action; - private final Logger logger; + private final Logger logger; - public KerberosAction(final KerberosUser kerberosUser, - final PrivilegedExceptionAction action, - final Logger logger) { + public KerberosAction(final KerberosUser kerberosUser, final PrivilegedExceptionAction action, final Logger logger) { this.kerberosUser = kerberosUser; - this.action = action; - this.logger = logger; + this.action = action; + this.logger = logger; + Validate.notNull(this.kerberosUser); Validate.notNull(this.action); Validate.notNull(this.logger); @@ -48,11 +47,13 @@ public KerberosAction(final KerberosUser kerberosUser, public T execute() throws Exception { T result; + // lazily login the first time the processor executes if (!kerberosUser.isLoggedIn()) { try { kerberosUser.login(); - logger.info("Successful login for " + kerberosUser.getPrincipal()); + + logger.info("Successful login for {}", kerberosUser.getPrincipal()); } catch (LoginException e) { throw new Exception("Login failed due to: " + e.getMessage(), e); } @@ -75,6 +76,7 @@ public T execute() throws Exception { try { kerberosUser.logout(); kerberosUser.login(); + result = kerberosUser.doAs(action); } catch (Exception e) { throw new Exception("Retrying privileged action failed due to: " + e.getMessage(), e); diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java index 2667721609..149364d4b9 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java @@ -30,8 +30,7 @@ /** * Used to authenticate and execute actions when Kerberos is enabled and a keytab is being used. - * - * */ + */ public class KerberosJAASConfigUser extends AbstractKerberosUser { private static final Logger LOG = LoggerFactory.getLogger(KerberosJAASConfigUser.class); @@ -43,20 +42,19 @@ public KerberosJAASConfigUser(final String configName, final Configuration confi this.config = config; } - @Override public String getPrincipal() { String ret = null; AppConfigurationEntry[] entries = config.getAppConfigurationEntry(configName); if (entries != null) { - for (AppConfigurationEntry entry : entries) { - if (entry.getOptions().containsKey(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP)) { - ret = (String) entry.getOptions().get(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP); + for (AppConfigurationEntry entry : entries) { + if (entry.getOptions().containsKey(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP)) { + ret = (String) entry.getOptions().get(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP); - break; - } - } + break; + } + } } return ret; @@ -64,15 +62,10 @@ public String getPrincipal() { @Override protected LoginContext createLoginContext(Subject subject) throws LoginException { - if (LOG.isDebugEnabled()) { - LOG.debug("==> KerberosJAASConfigUser.createLoginContext()"); - } + LOG.debug("==> KerberosJAASConfigUser.createLoginContext()"); - if (LOG.isDebugEnabled()) { - LOG.debug("<== KerberosJAASConfigUser.createLoginContext(), Subject: " + subject); - } + LOG.debug("<== KerberosJAASConfigUser.createLoginContext(), Subject: {}", subject); return new LoginContext(configName, subject, null, config); } } - diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java index fb6003e2fd..2357d9da04 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java @@ -20,6 +20,7 @@ package org.apache.ranger.audit.utils; import javax.security.auth.login.LoginException; + import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; @@ -28,7 +29,6 @@ * A keytab-based user that can login/logout and perform actions as the given user. */ public interface KerberosUser { - /** * Performs a login for the given user. * @@ -62,8 +62,7 @@ public interface KerberosUser { * @throws IllegalStateException if attempting to execute an action before performing a login * @throws PrivilegedActionException if the action itself threw an exception */ - T doAs(PrivilegedExceptionAction action) - throws IllegalStateException, PrivilegedActionException; + T doAs(PrivilegedExceptionAction action) throws IllegalStateException, PrivilegedActionException; /** * Performs a re-login if the TGT is close to expiration. @@ -82,6 +81,4 @@ T doAs(PrivilegedExceptionAction action) * @return the principal for this user */ String getPrincipal(); - } - diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java index c2bee8aad6..972bf89966 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java @@ -23,7 +23,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.exec.vector.*; +import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.CompressionKind; import org.apache.orc.OrcFile; import org.apache.orc.OrcFile.WriterOptions; @@ -35,63 +40,106 @@ import org.slf4j.LoggerFactory; import java.lang.reflect.Field; +import java.text.Format; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; -import java.util.Map; import java.util.HashMap; -import java.text.Format; -import java.text.SimpleDateFormat; +import java.util.Map; public class ORCFileUtil { - private static final Logger logger = LoggerFactory.getLogger(ORCFileUtil.class); - private static volatile ORCFileUtil me = null; + private static volatile ORCFileUtil me; + protected CompressionKind defaultCompression = CompressionKind.SNAPPY; protected CompressionKind compressionKind = CompressionKind.NONE; - protected TypeDescription schema = null; - protected VectorizedRowBatch batch = null; - protected String auditSchema = null; + protected TypeDescription schema; + protected VectorizedRowBatch batch; + protected String auditSchema; protected String dateFormat = "yyyy-MM-dd HH:mm:ss"; protected ArrayList schemaFields = new ArrayList<>(); - protected Map vectorizedRowBatchMap = new HashMap<>(); + protected Map vectorizedRowBatchMap = new HashMap<>(); protected int orcBufferSize; protected long orcStripeSize; public static ORCFileUtil getInstance() { ORCFileUtil orcFileUtil = me; + if (orcFileUtil == null) { synchronized (ORCFileUtil.class) { orcFileUtil = me; + if (orcFileUtil == null) { - me = orcFileUtil = new ORCFileUtil(); + orcFileUtil = new ORCFileUtil(); + me = orcFileUtil; } } } + return orcFileUtil; } - public void init(int orcBufferSize, long orcStripeSize, String compression) throws Exception{ - if (logger.isDebugEnabled()) { - logger.debug("==> ORCFileUtil.init()"); + public static void main(String[] args) throws Exception { + ORCFileUtil auditOrcFileUtil = new ORCFileUtil(); + + auditOrcFileUtil.init(10000, 100000L, "snappy"); + + try { + Configuration conf = new Configuration(); + FileSystem fs = FileSystem.get(conf); + Writer write = auditOrcFileUtil.createWriter(conf, fs, "/tmp/test.orc"); + Collection events = getTestEvent(); + + auditOrcFileUtil.log(write, events); + + write.close(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + protected static Collection getTestEvent() { + Collection events = new ArrayList<>(); + + for (int idx = 0; idx < 20; idx++) { + AuthzAuditEvent event = new AuthzAuditEvent(); + + event.setEventId(Integer.toString(idx)); + event.setClientIP("127.0.0.1"); + event.setAccessResult((short) 1); + event.setAclEnforcer("ranger-acl"); + event.setRepositoryName("hdfsdev"); + event.setRepositoryType(EnumRepositoryType.HDFS); + event.setResourcePath("/tmp/test-audit.log" + idx + idx + 1); + event.setResourceType("file"); + event.setAccessType("read"); + event.setEventTime(new Date()); + event.setResultReason(Integer.toString(1)); + + events.add(event); } + + return events; + } + + public void init(int orcBufferSize, long orcStripeSize, String compression) throws Exception { + logger.debug("==> ORCFileUtil.init()"); + this.orcBufferSize = orcBufferSize; this.orcStripeSize = orcStripeSize; this.compressionKind = getORCCompression(compression); + initORCAuditSchema(); - if (logger.isDebugEnabled()) { - logger.debug("<== ORCFileUtil.init() : orcBufferSize: " + orcBufferSize + " stripeSize: " + orcStripeSize + - " compression: " + compression); - } + + logger.debug("<== ORCFileUtil.init() : orcBufferSize: {} stripeSize: {} compression: {}", orcBufferSize, orcStripeSize, compression); } public Writer createWriter(Configuration conf, FileSystem fs, String path) throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> ORCFileUtil.createWriter()"); - } - Writer ret = null; + logger.debug("==> ORCFileUtil.createWriter()"); + WriterOptions writeOptions = OrcFile.writerOptions(conf) .fileSystem(fs) .setSchema(schema) @@ -99,151 +147,160 @@ public Writer createWriter(Configuration conf, FileSystem fs, String path) throw .stripeSize(orcStripeSize) .compress(compressionKind); - ret = OrcFile.createWriter(new Path(path), writeOptions); - if (logger.isDebugEnabled()) { - logger.debug("<== ORCFileUtil.createWriter()"); - } + Writer ret = OrcFile.createWriter(new Path(path), writeOptions); + + logger.debug("<== ORCFileUtil.createWriter()"); + return ret; } - public void close(Writer writer) throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> ORCFileUtil.close()"); - } + public void close(Writer writer) throws Exception { + logger.debug("==> ORCFileUtil.close()"); writer.close(); - if (logger.isDebugEnabled()) { - logger.debug("<== ORCFileUtil.close()"); - } + logger.debug("<== ORCFileUtil.close()"); } public void log(Writer writer, Collection events) throws Exception { int eventBatchSize = events.size(); - if (logger.isDebugEnabled()) { - logger.debug("==> ORCFileUtil.log() : EventSize: " + eventBatchSize + "ORC bufferSize:" + orcBufferSize ); - } + logger.debug("==> ORCFileUtil.log() : EventSize: {} ORC bufferSize:{}", eventBatchSize, orcBufferSize); try { - for(AuthzAuditEvent event : events) { + for (AuthzAuditEvent event : events) { int row = batch.size++; - for (int j=0;j ORCWriter.initORCAuditSchema()"); - } + logger.debug("==> ORCWriter.initORCAuditSchema()"); + auditSchema = getAuditSchema(); - Map schemaFieldTypeMap = getSchemaFieldTypeMap(); + + Map schemaFieldTypeMap = getSchemaFieldTypeMap(); + schema = TypeDescription.fromString(auditSchema); batch = schema.createRowBatch(orcBufferSize); + buildVectorRowBatch(schemaFieldTypeMap); - if (logger.isDebugEnabled()) { - logger.debug("<== ORCWriter.initORCAuditSchema()"); - } + + logger.debug("<== ORCWriter.initORCAuditSchema()"); } - protected Map getSchemaFieldTypeMap() { - Map ret = new HashMap<>(); + protected Map getSchemaFieldTypeMap() { + Map ret = new HashMap<>(); + + int index1 = auditSchema.indexOf("<"); + int index2 = auditSchema.indexOf(">"); + String subAuditSchema = auditSchema.substring(index1 + 1, index2); + String[] fields = subAuditSchema.split(","); - int index1 = auditSchema.indexOf("<"); - int index2 = auditSchema.indexOf(">"); - String subAuditSchema = auditSchema.substring(index1+1,index2); - String[] fields = subAuditSchema.split(","); schemaFields = new ArrayList<>(); - for (String field: fields) { + for (String field : fields) { String[] flds = field.split(":"); + schemaFields.add(flds[0]); - ret.put(flds[0],flds[1]); + + ret.put(flds[0], flds[1]); } + return ret; } - protected void buildVectorRowBatch(Map schemaFieldTypeMap) throws Exception { - int i = 0; - for (i=0;i schemaFieldTypeMap) throws Exception { + for (int i = 0; i < schemaFields.size(); i++) { + String fld = schemaFields.get(i); + String fieldType = schemaFieldTypeMap.get(fld); ColumnVector columnVector = getColumnVectorType(fieldType); + if (columnVector instanceof LongColumnVector) { - vectorizedRowBatchMap.put(fld, (LongColumnVector) batch.cols[i]); + vectorizedRowBatchMap.put(fld, batch.cols[i]); } else if (columnVector instanceof BytesColumnVector) { - vectorizedRowBatchMap.put(fld, (BytesColumnVector) batch.cols[i]); + vectorizedRowBatchMap.put(fld, batch.cols[i]); } else if (columnVector instanceof DecimalColumnVector) { - vectorizedRowBatchMap.put(fld, (DecimalColumnVector) batch.cols[i]); + vectorizedRowBatchMap.put(fld, batch.cols[i]); } } } - protected SchemaInfo getFieldValue(AuthzAuditEvent event, String fieldName ) { + protected SchemaInfo getFieldValue(AuthzAuditEvent event, String fieldName) { SchemaInfo ret = new SchemaInfo(); + try { - Class aClass = AuthzAuditEvent.class; - Field fld = aClass.getDeclaredField(fieldName); + Class aClass = AuthzAuditEvent.class; + Field fld = aClass.getDeclaredField(fieldName); + fld.setAccessible(true); - Class cls = fld.getType(); - Object value = fld.get(event); + Class cls = fld.getType(); + Object value = fld.get(event); ret.setField(fieldName); ret.setType(cls.getName()); ret.setValue(value); - } catch (Exception e){ + } catch (Exception e) { logger.error("Error while writing into ORC File:", e); } return ret; } protected ColumnVector getColumnVectorType(String fieldType) throws Exception { - ColumnVector ret = null; + final ColumnVector ret; + fieldType = fieldType.toLowerCase(); - switch(fieldType) { - case "int" : + + switch (fieldType) { + case "int": case "bigint": case "date": case "boolean": @@ -253,10 +310,10 @@ protected ColumnVector getColumnVectorType(String fieldType) throws Exception { case "varchar": case "char": case "binary": - ret = new BytesColumnVector(); + ret = new BytesColumnVector(); break; case "decimal": - ret = new DecimalColumnVector(10,5); + ret = new DecimalColumnVector(10, 5); break; case "double": case "float": @@ -267,75 +324,87 @@ protected ColumnVector getColumnVectorType(String fieldType) throws Exception { case "uniontype": case "struct": throw new Exception("Unsuppoted field Type"); + default: + ret = null; + break; } + return ret; } protected Long castLongObject(Object object) { - Long ret = 0l; + long ret = 0L; + try { - if (object instanceof Long) + if (object instanceof Long) { ret = ((Long) object); - else if (object instanceof Integer) { + } else if (object instanceof Integer) { ret = ((Integer) object).longValue(); } else if (object instanceof String) { - ret = Long.valueOf((String) object); + ret = Long.parseLong((String) object); } } catch (Exception e) { logger.error("Error while writing into ORC File:", e); } + return ret; } protected String castStringObject(Object object) { String ret = null; + try { - if (object instanceof String) + if (object instanceof String) { ret = (String) object; - else if (object instanceof Date) { + } else if (object instanceof Date) { ret = (getDateString((Date) object)); } } catch (Exception e) { logger.error("Error while writing into ORC File:", e); } + return ret; } - protected String getAuditSchema() { - if (logger.isDebugEnabled()) { - logger.debug("==> ORCWriter.getAuditSchema()"); - } - String ret = null; - String fieldStr = "struct<"; - StringBuilder sb = new StringBuilder(fieldStr); + protected String getAuditSchema() { + logger.debug("==> ORCWriter.getAuditSchema()"); + + String fieldStr = "struct<"; + StringBuilder sb = new StringBuilder(fieldStr); + Class auditEventClass = AuthzAuditEvent.class; - Class auditEventClass = AuthzAuditEvent.class; - for(Field fld: auditEventClass.getDeclaredFields()) { + for (Field fld : auditEventClass.getDeclaredFields()) { if (fld.isAnnotationPresent(JsonProperty.class)) { String field = fld.getName(); String fieldType = getShortFieldType(fld.getType().getName()); + if (fieldType == null) { continue; } + fieldStr = field + ":" + fieldType + ","; + sb.append(fieldStr); } } + fieldStr = sb.toString(); + if (fieldStr.endsWith(",")) { fieldStr = fieldStr.substring(0, fieldStr.length() - 1); } - ret = fieldStr + ">"; - if (logger.isDebugEnabled()) { - logger.debug("<== ORCWriter.getAuditSchema() AuditSchema: " + ret); - } + String ret = fieldStr + ">"; + + logger.debug("<== ORCWriter.getAuditSchema() AuditSchema: {}", ret); + return ret; } - protected String getShortFieldType(String type){ - String ret = null; - switch(type) { + protected String getShortFieldType(String type) { + final String ret; + + switch (type) { case "java.lang.String": ret = "string"; break; @@ -349,50 +418,24 @@ protected String getShortFieldType(String type){ ret = "string"; break; case "long": - ret = "bigint"; + ret = "bigint"; break; default: ret = null; - } - return ret; - } - - class SchemaInfo { - String field = null; - String type = null; - Object value = null; - - public String getField() { - return field; - } - - public void setField(String field) { - this.field = field; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public Object getValue() { - return value; + break; } - public void setValue(Object value) { - this.value = value; - } + return ret; } protected CompressionKind getORCCompression(String compression) { - CompressionKind ret; + final CompressionKind ret; + if (compression == null) { compression = defaultCompression.name().toLowerCase(); } - switch(compression) { + + switch (compression) { case "snappy": ret = CompressionKind.SNAPPY; break; @@ -409,41 +452,37 @@ protected CompressionKind getORCCompression(String compression) { ret = defaultCompression; break; } + return ret; } - public static void main(String[] args) throws Exception { - ORCFileUtil auditOrcFileUtil = new ORCFileUtil(); - auditOrcFileUtil.init(10000,100000L,"snappy"); - try { - Configuration conf = new Configuration(); - FileSystem fs = FileSystem.get(conf); - Writer write = auditOrcFileUtil.createWriter(conf, fs, "/tmp/test.orc"); - Collection events = getTestEvent(); - auditOrcFileUtil.log(write, events); - write.close(); - } catch (Exception e){ - e.printStackTrace(); + static class SchemaInfo { + String field; + String type; + Object value; + + public String getField() { + return field; } - } - protected static Collection getTestEvent() { - Collection events = new ArrayList<>(); - for (int idx=0;idx<20;idx++) { - AuthzAuditEvent event = new AuthzAuditEvent(); - event.setEventId(Integer.toString(idx)); - event.setClientIP("127.0.0.1"); - event.setAccessResult((short) 1); - event.setAclEnforcer("ranger-acl"); - event.setRepositoryName("hdfsdev"); - event.setRepositoryType(EnumRepositoryType.HDFS); - event.setResourcePath("/tmp/test-audit.log" +idx+idx+1); - event.setResourceType("file"); - event.setAccessType("read"); - event.setEventTime(new Date()); - event.setResultReason(Integer.toString(1)); - events.add(event); + public void setField(String field) { + this.field = field; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Object getValue() { + return value; + } + + public void setValue(Object value) { + this.value = value; } - return events; } -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java index fbe9301f47..ab61623fb7 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java @@ -25,7 +25,7 @@ import java.util.Properties; public interface RangerAuditWriter { - void init(Properties prop, String propPrefix, String auditProviderName, Map auditConfigs); + void init(Properties prop, String propPrefix, String auditProviderName, Map auditConfigs); boolean log(Collection events) throws Exception; diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java index f74f0cbd32..eb7a2a78cc 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java @@ -26,103 +26,113 @@ import java.io.File; import java.io.PrintWriter; import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.Map; +import java.util.Properties; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; -import java.util.Collection; -import java.util.Map; -import java.util.Properties; /** * Writes the Ranger audit to HDFS as JSON text */ public class RangerJSONAuditWriter extends AbstractRangerAuditWriter { - private static final Logger logger = LoggerFactory.getLogger(RangerJSONAuditWriter.class); - public static final String PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER = "file.rollover.enable.periodic.rollover"; - public static final String PROP_HDFS_ROLLOVER_PERIODIC_ROLLOVER_CHECK_TIME = "file.rollover.periodic.rollover.check.sec"; - protected String JSON_FILE_EXTENSION = ".log"; + public static final String PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER = "file.rollover.enable.periodic.rollover"; + public static final String PROP_HDFS_ROLLOVER_PERIODIC_ROLLOVER_CHECK_TIME = "file.rollover.periodic.rollover.check.sec"; + + protected static final String JSON_FILE_EXTENSION = ".log"; /* * When enableAuditFilePeriodicRollOver is enabled, Audit File in HDFS would be closed by the defined period in * xasecure.audit.destination.hdfs.file.rollover.sec. By default xasecure.audit.destination.hdfs.file.rollover.sec = 86400 sec * and file will be closed midnight. Custom rollover time can be set by defining file.rollover.sec to desire time in seconds. */ - private boolean enableAuditFilePeriodicRollOver = false; + private boolean enableAuditFilePeriodicRollOver; /* Time frequency of next occurrence of periodic rollover check. By Default every 60 seconds the check is done. */ private long periodicRollOverCheckTimeinSec; - public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { - if (logger.isDebugEnabled()) { - logger.debug("==> RangerJSONAuditWriter.init()"); - } + public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { + logger.debug("==> RangerJSONAuditWriter.init()"); + init(); - super.init(props,propPrefix,auditProviderName,auditConfigs); + + super.init(props, propPrefix, auditProviderName, auditConfigs); // start AuditFilePeriodicRollOverTask if enabled. - enableAuditFilePeriodicRollOver = MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER, false); + enableAuditFilePeriodicRollOver = MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER, false); if (enableAuditFilePeriodicRollOver) { periodicRollOverCheckTimeinSec = MiscUtil.getLongProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_PERIODIC_ROLLOVER_CHECK_TIME, 60L); + try { - if (logger.isDebugEnabled()) { - logger.debug("rolloverPeriod: " + rolloverPeriod + " nextRollOverTime: " + nextRollOverTime + " periodicRollOverTimeinSec: " + periodicRollOverCheckTimeinSec); - } + logger.debug("rolloverPeriod: {} nextRollOverTime: {} periodicRollOverTimeinSec: {}", rolloverPeriod, nextRollOverTime, periodicRollOverCheckTimeinSec); + startAuditFilePeriodicRollOverTask(); } catch (Exception e) { logger.warn("Error enabling audit file perodic rollover..! Default behavior will be"); } } - if (logger.isDebugEnabled()) { - logger.debug("<== RangerJSONAuditWriter.init()"); - } + logger.debug("<== RangerJSONAuditWriter.init()"); + } + + public void flush() { + logger.debug("==> JSONWriter.flush() called. name={}", auditProviderName); + + super.flush(); + + logger.debug("<== JSONWriter.flush()"); } public void init() { setFileExtension(JSON_FILE_EXTENSION); } - synchronized public boolean logJSON(final Collection events) throws Exception { + public synchronized boolean logJSON(final Collection events) throws Exception { PrintWriter out = null; + try { - if (logger.isDebugEnabled()) { - logger.debug("UGI = {}, will write to HDFS file = {}", MiscUtil.getUGILoginUser(), currentFileName); - } - out = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public PrintWriter run() throws Exception { - PrintWriter out = getLogFileStream(); - for (String event : events) { - out.println(event); - } - return out; - }; + logger.debug("UGI = {}, will write to HDFS file = {}", MiscUtil.getUGILoginUser(), currentFileName); + + out = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + PrintWriter out1 = getLogFileStream(); + + for (String event : events) { + out1.println(event); + } + + return out1; }); + // flush and check the stream for errors if (out.checkError()) { // In theory, this count may NOT be accurate as part of the messages may have been successfully written. // However, in practice, since client does buffering, either all or none would succeed. logger.error("Stream encountered errors while writing audits to HDFS!"); + closeWriter(); resetWriter(); + reUseLastLogFile = true; + return false; } } catch (Exception e) { logger.error("Exception encountered while writing audits to HDFS!", e); closeWriter(); resetWriter(); + reUseLastLogFile = true; + return false; } finally { - if (logger.isDebugEnabled()) { - logger.debug("Flushing HDFS audit. Event Size:" + events.size()); - } + logger.debug("Flushing HDFS audit. Event Size:{}", events.size()); + if (out != null) { out.flush(); } @@ -133,113 +143,92 @@ public PrintWriter run() throws Exception { } @Override - public boolean log(Collection events) throws Exception { + public boolean log(Collection events) throws Exception { return logJSON(events); } - synchronized public boolean logAsFile(final File file) throws Exception { - boolean ret = false; - if (logger.isDebugEnabled()) { - logger.debug("UGI=" + MiscUtil.getUGILoginUser() - + ". Will write to HDFS file=" + currentFileName); - } - Boolean retVal = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public Boolean run() throws Exception { - boolean ret = logFileToHDFS(file); - return Boolean.valueOf(ret); - }; - }); - ret = retVal.booleanValue(); - logger.info("Flushing HDFS audit File :" + file.getAbsolutePath() + file.getName()); - return ret; - } - @Override public boolean logFile(File file) throws Exception { return logAsFile(file); } - synchronized public PrintWriter getLogFileStream() throws Exception { - if (!enableAuditFilePeriodicRollOver) { - // when periodic rollover is enabled closing of file is done by the file rollover monitoring task and hence don't need to - // close the file inline with audit logging. - closeFileIfNeeded(); - } - // Either there are no open log file or the previous one has been rolled over - PrintWriter logWriter = createWriter(); - return logWriter; - } - - - public void flush() { - if (logger.isDebugEnabled()) { - logger.debug("==> JSONWriter.flush() called. name=" + auditProviderName); - } - super.flush(); - if (logger.isDebugEnabled()) { - logger.debug("<== JSONWriter.flush()"); - } - } - @Override public void start() { // nothing to start } @Override - synchronized public void stop() { - if (logger.isDebugEnabled()) { - logger.debug("==> JSONWriter.stop()"); - } + public synchronized void stop() { + logger.debug("==> JSONWriter.stop()"); + if (logWriter != null) { try { logWriter.flush(); logWriter.close(); } catch (Throwable t) { - logger.error("Error on closing log writter. Exception will be ignored. name=" - + auditProviderName + ", fileName=" + currentFileName); + logger.error("Error on closing log writter. Exception will be ignored. name={}, fileName={}", auditProviderName, currentFileName); } + logWriter = null; - ostream = null; + ostream = null; } - if (logger.isDebugEnabled()) { - logger.debug("<== JSONWriter.stop()"); + + logger.debug("<== JSONWriter.stop()"); + } + + public synchronized boolean logAsFile(final File file) throws Exception { + logger.debug("UGI={}. Will write to HDFS file={}", MiscUtil.getUGILoginUser(), currentFileName); + + boolean ret = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> logFileToHDFS(file)); + + logger.info("Flushing HDFS audit File :{}{}", file.getAbsolutePath(), file.getName()); + + return ret; + } + + public synchronized PrintWriter getLogFileStream() throws Exception { + if (!enableAuditFilePeriodicRollOver) { + // when periodic rollover is enabled closing of file is done by the file rollover monitoring task and hence don't need to + // close the file inline with audit logging. + closeFileIfNeeded(); } + + // Either there are no open log file or the previous one has been rolled over + return createWriter(); } private void startAuditFilePeriodicRollOverTask() { ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new AuditFilePeriodicRollOverTaskThreadFactory()); - if (logger.isDebugEnabled()) { - logger.debug("HDFSAuditDestination.startAuditFilePeriodicRollOverTask() strated.." + "Audit File rollover happens every " + rolloverPeriod ); - } + logger.debug("HDFSAuditDestination.startAuditFilePeriodicRollOverTask() strated..Audit File rollover happens every {}", rolloverPeriod); executorService.scheduleAtFixedRate(new AuditFilePeriodicRollOverTask(), 0, periodicRollOverCheckTimeinSec, TimeUnit.SECONDS); } - class AuditFilePeriodicRollOverTaskThreadFactory implements ThreadFactory { + static class AuditFilePeriodicRollOverTaskThreadFactory implements ThreadFactory { //Threadfactory to create a daemon Thread. + @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "AuditFilePeriodicRollOverTask"); + t.setDaemon(true); + return t; } } private class AuditFilePeriodicRollOverTask implements Runnable { + @Override public void run() { - if (logger.isDebugEnabled()) { - logger.debug("==> AuditFilePeriodicRollOverTask.run()"); - } + logger.debug("==> AuditFilePeriodicRollOverTask.run()"); + try { closeFileIfNeeded(); } catch (Exception excp) { logger.error("AuditFilePeriodicRollOverTask Failed. Aborting..", excp); } - if (logger.isDebugEnabled()) { - logger.debug("<== AuditFilePeriodicRollOverTask.run()"); - } + + logger.debug("<== AuditFilePeriodicRollOverTask.run()"); } } -} \ No newline at end of file +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerORCAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerORCAuditWriter.java index b7cddfc2fd..b60ae77852 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerORCAuditWriter.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerORCAuditWriter.java @@ -33,83 +33,93 @@ import java.util.Properties; /** - * This class writes the Ranger audits to HDFS as ORC files - * Refer README.TXT for enabling ORCWriter. + * This class writes the Ranger audits to HDFS as ORC files + * Refer README.TXT for enabling ORCWriter. */ public class RangerORCAuditWriter extends AbstractRangerAuditWriter { private static final Logger logger = LoggerFactory.getLogger(RangerORCAuditWriter.class); protected static final String ORC_FILE_EXTENSION = ".orc"; - protected volatile ORCFileUtil orcFileUtil = null; - protected Writer orcLogWriter = null; - protected String fileType = "orc"; - protected String compression = null; - protected int orcBufferSize = 0; - protected int defaultbufferSize = 100000; - protected long orcStripeSize = 0; - protected long defaultStripeSize = 100000L; + + protected volatile ORCFileUtil orcFileUtil; + + protected Writer orcLogWriter; + protected String fileType = "orc"; + protected String compression; + protected int orcBufferSize; + protected int defaultbufferSize = 100000; + protected long orcStripeSize; + protected long defaultStripeSize = 100000L; @Override - public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { - if (logger.isDebugEnabled()) { - logger.debug("==> RangerORCAuditWriter.init()"); - } - init(props,propPrefix,auditProviderName); + public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) { + logger.debug("==> RangerORCAuditWriter.init()"); + + init(props, propPrefix, auditProviderName); + super.init(props, propPrefix, auditProviderName, auditConfigs); - if (logger.isDebugEnabled()) { - logger.debug("<== RangerORCAuditWriter.init()"); - } + + logger.debug("<== RangerORCAuditWriter.init()"); + } + + @Override + public void flush() { + //For HDFSAuditDestionation with ORC format each file is flushed immediately after writing the ORC batch. + //So nothing to flush. } - synchronized public boolean logAuditAsORC(final Collection events) throws Exception { + public synchronized boolean logAuditAsORC(final Collection events) throws Exception { boolean ret = false; Writer out = null; + try { if (logger.isDebugEnabled()) { - logger.debug("UGI=" + MiscUtil.getUGILoginUser() - + ". Will write to HDFS file=" + currentFileName); + logger.debug("UGI={}. Will write to HDFS file={}", MiscUtil.getUGILoginUser(), currentFileName); } - out = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public Writer run() throws Exception { - Writer out = getORCFileWrite(); - orcFileUtil.log(out,events); - return out; - } + out = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { + Writer out1 = getORCFileWrite(); + + orcFileUtil.log(out1, events); + + return out1; }); } catch (Exception e) { orcLogWriter = null; + logger.error("Error while writing into ORC FileWriter", e); + throw e; } finally { - if (logger.isDebugEnabled()) { - logger.debug("Flushing HDFS audit in ORC Format. Event Size:" + events.size()); - } + logger.debug("Flushing HDFS audit in ORC Format. Event Size:{}", events.size()); + if (out != null) { try { //flush and close the ORC batch file orcFileUtil.close(out); + ret = true; } catch (Exception e) { logger.error("Error while closing the ORC FileWriter", e); + throw e; } + orcLogWriter = null; } } + return ret; } @Override - public void flush() { - //For HDFSAuditDestionation with ORC format each file is flushed immediately after writing the ORC batch. - //So nothing to flush. + public boolean log(Collection events) throws Exception { + return logAsORC(events); } @Override - public boolean log(Collection events) throws Exception { - return logAsORC(events); + public boolean logFile(File file) throws Exception { + return false; } @Override @@ -123,51 +133,30 @@ public synchronized void stop() { try { orcFileUtil.close(orcLogWriter); } catch (Throwable t) { - logger.error("Error on closing log ORC Writer. Exception will be ignored. name=" - + auditProviderName + ", fileName=" + currentFileName); + logger.error("Error on closing log ORC Writer. Exception will be ignored. name={}, fileName={}", auditProviderName, currentFileName); } - orcLogWriter = null; - } - } - - @Override - public boolean logFile(File file) throws Exception { - return false; - } - // Creates ORC Write file - protected synchronized Writer getORCFileWrite() throws Exception { - if (logger.isDebugEnabled()) { - logger.debug("==> RangerORCAuditWriter.getORCFileWrite()"); - } - if (orcLogWriter == null) { - // Create the file to write - createFileSystemFolders(); - logger.info("Creating new log file. hdfPath=" + fullPath); - orcLogWriter = orcFileUtil.createWriter(conf, fileSystem, fullPath); - currentFileName = fullPath; - } - if (logger.isDebugEnabled()) { - logger.debug("<== RangerORCAuditWriter.getORCFileWrite()"); + orcLogWriter = null; } - return orcLogWriter; } - public boolean logAsORC(Collection events) throws Exception { - boolean ret = false; + public boolean logAsORC(Collection events) throws Exception { Collection authzAuditEvents = getAuthzAuditEvents(events); - ret = logAuditAsORC(authzAuditEvents); - return ret; + + return logAuditAsORC(authzAuditEvents); } - public Collection getAuthzAuditEvents(Collection events) throws Exception { + public Collection getAuthzAuditEvents(Collection events) { Collection ret = new ArrayList<>(); + for (String event : events) { try { AuthzAuditEvent authzAuditEvent = MiscUtil.fromJson(event, AuthzAuditEvent.class); + ret.add(authzAuditEvent); } catch (Exception e) { - logger.error("Error converting to From JSON to AuthzAuditEvent=" + event); + logger.error("Error converting to From JSON to AuthzAuditEvent={}", event); + throw e; } } @@ -175,15 +164,37 @@ public Collection getAuthzAuditEvents(Collection events } public void init(Properties props, String propPrefix, String auditProviderName) { - compression = MiscUtil.getStringProperty(props, propPrefix + "." + fileType +".compression"); - orcBufferSize = MiscUtil.getIntProperty(props, propPrefix + "." + fileType +".buffersize",defaultbufferSize); - orcStripeSize = MiscUtil.getLongProperty(props, propPrefix + "." + fileType +".stripesize",defaultStripeSize); + compression = MiscUtil.getStringProperty(props, propPrefix + "." + fileType + ".compression"); + orcBufferSize = MiscUtil.getIntProperty(props, propPrefix + "." + fileType + ".buffersize", defaultbufferSize); + orcStripeSize = MiscUtil.getLongProperty(props, propPrefix + "." + fileType + ".stripesize", defaultStripeSize); + setFileExtension(ORC_FILE_EXTENSION); + try { orcFileUtil = ORCFileUtil.getInstance(); + orcFileUtil.init(orcBufferSize, orcStripeSize, compression); - } catch ( Exception e) { + } catch (Exception e) { logger.error("Error while doing ORCWriter.init() ", e); } } -} \ No newline at end of file + + // Creates ORC Write file + protected synchronized Writer getORCFileWrite() throws Exception { + logger.debug("==> RangerORCAuditWriter.getORCFileWrite()"); + + if (orcLogWriter == null) { + // Create the file to write + createFileSystemFolders(); + + logger.info("Creating new log file. hdfPath={}", fullPath); + + orcLogWriter = orcFileUtil.createWriter(conf, fileSystem, fullPath); + currentFileName = fullPath; + } + + logger.debug("<== RangerORCAuditWriter.getORCFileWrite()"); + + return orcLogWriter; + } +} diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RollingTimeUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RollingTimeUtil.java index f59817f7c6..dee3f63490 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RollingTimeUtil.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RollingTimeUtil.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,252 +18,252 @@ package org.apache.ranger.audit.utils; +import org.apache.commons.lang.StringUtils; + import java.util.Calendar; import java.util.Date; -import org.apache.commons.lang.StringUtils; - public class RollingTimeUtil { - public static final String MINUTES ="m"; //minutes - public static final String HOURS ="h"; //hours - public static final String DAYS ="d"; //days - public static final String WEEKS ="w"; //weeks - public static final String MONTHS ="M"; //months - public static final String YEARS ="y"; //years - - private static volatile RollingTimeUtil me = null; - - public static RollingTimeUtil getInstance() { - RollingTimeUtil result = me; - if ( result == null) { - synchronized(RollingTimeUtil.class) { - result = me; - if ( result == null){ - me = result = new RollingTimeUtil(); - } - } - } - return result; - } - - public RollingTimeUtil() { - } - - public Date computeNextRollingTime(String rollingTimePeriod) throws Exception{ - Date ret = null; - - if (!StringUtils.isEmpty(rollingTimePeriod)) { - String computePeriod = getTimeLiteral(rollingTimePeriod); - int timeNumeral = getTimeNumeral(rollingTimePeriod,computePeriod); - switch(computePeriod) { - case MINUTES: - ret = computeTopOfMinuteDate(timeNumeral); - break; - case HOURS: - ret = computeTopOfHourDate(timeNumeral); - break; - case DAYS: - ret = computeTopOfDayDate(timeNumeral); - break; - case WEEKS: - ret = computeTopOfWeekDate(timeNumeral); - break; - case MONTHS: - ret = computeTopofMonthDate(timeNumeral); - break; - case YEARS: - ret = computeTopOfYearDate(timeNumeral); - break; - } - } else { - throw new Exception("Unable to compute Next Rolling using the given Rollover period"); - } - return ret; - } - - public String convertRolloverSecondsToRolloverPeriod(long duration) { - final int SECONDS_IN_MINUTE = 60; - final int SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE; - final int SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR; - - String ret = null; - int days = (int) (duration / SECONDS_IN_DAY); - duration %= SECONDS_IN_DAY; - int hours = (int) (duration / SECONDS_IN_HOUR); - duration %= SECONDS_IN_HOUR; - int minutes = (int) (duration / SECONDS_IN_MINUTE); - - if(days != 0) { - if(hours == 0 && minutes == 0) { - ret = (days + DAYS); - } - } else if(hours != 0) { - if(minutes == 0) { - ret = (hours + HOURS); - } - } else if(minutes != 0) { - ret = (minutes + MINUTES); - } - return ret; - } - - public long computeNextRollingTime(long durationSeconds, Date previousRolloverTime) { - long now = System.currentTimeMillis(); - long nextRolloverTime = (previousRolloverTime == null) ? now : previousRolloverTime.getTime(); - long durationMillis = (durationSeconds < 1 ? 1 : durationSeconds) * 1000; - - while( nextRolloverTime <= now ) { - nextRolloverTime += durationMillis; - } - - return nextRolloverTime; - } - - private Date computeTopOfYearDate( int years){ - Date ret = null; - - Calendar calendarStart=Calendar.getInstance(); - calendarStart.add(Calendar.YEAR,years); - calendarStart.set(Calendar.MONTH,0); - calendarStart.set(Calendar.DAY_OF_MONTH,1); - calendarStart.set(Calendar.HOUR_OF_DAY,0); - calendarStart.clear(Calendar.MINUTE); - calendarStart.clear(Calendar.SECOND); - calendarStart.clear(Calendar.MILLISECOND); - - ret = calendarStart.getTime(); - - return ret; - } - - private Date computeTopofMonthDate(int months){ - - Date ret = null; - - Calendar calendarMonth=Calendar.getInstance(); - calendarMonth.set(Calendar.DAY_OF_MONTH,1); - calendarMonth.add(Calendar.MONTH, months); - calendarMonth.set(Calendar.HOUR_OF_DAY, 0); - calendarMonth.clear(Calendar.MINUTE); - calendarMonth.clear(Calendar.SECOND); - calendarMonth.clear(Calendar.MILLISECOND); - - ret = calendarMonth.getTime(); - - return ret; - } - - private Date computeTopOfWeekDate(int weeks) { - Date ret = null; - - Calendar calendarWeek=Calendar.getInstance(); - calendarWeek.set(Calendar.DAY_OF_WEEK,calendarWeek.getFirstDayOfWeek()); - calendarWeek.add(Calendar.WEEK_OF_YEAR,weeks); - calendarWeek.set(Calendar.HOUR_OF_DAY,0); - calendarWeek.clear(Calendar.MINUTE); - calendarWeek.clear(Calendar.SECOND); - calendarWeek.clear(Calendar.MILLISECOND); - - ret=calendarWeek.getTime(); - - return ret; - } - - private Date computeTopOfDayDate(int days){ - - Date ret = null; - - Calendar calendarDay=Calendar.getInstance(); - calendarDay.add(Calendar.DAY_OF_MONTH, days); - calendarDay.set(Calendar.HOUR_OF_DAY, 0); - calendarDay.clear(Calendar.MINUTE); - calendarDay.clear(Calendar.SECOND); - calendarDay.clear(Calendar.MILLISECOND); - - ret = calendarDay.getTime(); - - return ret; - - } - - private Date computeTopOfHourDate(int hours) { - Date ret = null; - - Calendar calendarHour=Calendar.getInstance(); - calendarHour.add(Calendar.HOUR_OF_DAY, hours); - calendarHour.clear(Calendar.MINUTE); - calendarHour.clear(Calendar.SECOND); - calendarHour.clear(Calendar.MILLISECOND); - - ret = calendarHour.getTime(); - - return ret; - } - - private Date computeTopOfMinuteDate(int mins) { - Date ret = null; - - Calendar calendarMin=Calendar.getInstance(); - calendarMin.add(Calendar.MINUTE,mins); - calendarMin.clear(Calendar.SECOND); - calendarMin.clear(Calendar.MILLISECOND); - - ret = calendarMin.getTime(); - - return ret; - } - - private int getTimeNumeral(String rollOverPeriod, String timeLiteral) throws Exception { - - int ret = Integer.valueOf(rollOverPeriod.substring(0, rollOverPeriod.length() - (rollOverPeriod.length() - rollOverPeriod.indexOf(timeLiteral)))); - - return ret; - } - - private String getTimeLiteral(String rollOverPeriod) throws Exception { - String ret = null; - if(StringUtils.isEmpty(rollOverPeriod)) { - throw new Exception("empty rollover period"); - } else if(rollOverPeriod.endsWith(MINUTES)) { - ret = MINUTES; - } else if(rollOverPeriod.endsWith(HOURS)) { - ret = HOURS; - } else if(rollOverPeriod.endsWith(DAYS)) { - ret = DAYS; - } else if(rollOverPeriod.endsWith(WEEKS)) { - ret = WEEKS; - } else if(rollOverPeriod.endsWith(MONTHS)) { - ret = MONTHS; - } else if(rollOverPeriod.endsWith(YEARS)) { - ret = YEARS; - } else { - throw new Exception(rollOverPeriod + ": invalid rollover period"); - } - return ret; - } - - public static void main(String[] args) { - // Test Method for RolloverTime calculation - // Set rollOverPeriod 10m,30m..,1h,2h,..1d,2d..,1w,2w..,1M,2M..1y..2y - // If nothing is set for rollOverPeriod or Duration default rollOverPeriod is 1 day - String rollOverPeriod = ""; - RollingTimeUtil rollingTimeUtil = new RollingTimeUtil(); - int duration = 86400; - Date nextRollOvertime = null; - - try { - nextRollOvertime = rollingTimeUtil.computeNextRollingTime(rollOverPeriod); - } catch (Exception e) { - rollOverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(duration); - System.out.println(rollOverPeriod); - try { - nextRollOvertime = rollingTimeUtil.computeNextRollingTime(rollOverPeriod); - System.out.println(nextRollOvertime); - } catch (Exception e1) { - e1.printStackTrace(); - } - long rollOverTime = rollingTimeUtil.computeNextRollingTime(duration, null); - nextRollOvertime = new Date(rollOverTime); - } - } + public static final String MINUTES = "m"; //minutes + public static final String HOURS = "h"; //hours + public static final String DAYS = "d"; //days + public static final String WEEKS = "w"; //weeks + public static final String MONTHS = "M"; //months + public static final String YEARS = "y"; //years + + private static final int SECONDS_IN_MINUTE = 60; + private static final int SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE; + private static final int SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR; + + private static volatile RollingTimeUtil me; + + public RollingTimeUtil() { + } + + public static RollingTimeUtil getInstance() { + RollingTimeUtil result = me; + + if (result == null) { + synchronized (RollingTimeUtil.class) { + result = me; + + if (result == null) { + result = new RollingTimeUtil(); + me = result; + } + } + } + + return result; + } + + public static void main(String[] args) { + // Test Method for RolloverTime calculation + // Set rollOverPeriod 10m,30m..,1h,2h,..1d,2d..,1w,2w..,1M,2M..1y..2y + // If nothing is set for rollOverPeriod or Duration default rollOverPeriod is 1 day + String rollOverPeriod = ""; + RollingTimeUtil rollingTimeUtil = new RollingTimeUtil(); + int duration = 86400; + Date nextRollOvertime; + + try { + nextRollOvertime = rollingTimeUtil.computeNextRollingTime(rollOverPeriod); + } catch (Exception e) { + rollOverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(duration); + + System.out.println(rollOverPeriod); + + try { + nextRollOvertime = rollingTimeUtil.computeNextRollingTime(rollOverPeriod); + + System.out.println(nextRollOvertime); + } catch (Exception e1) { + e1.printStackTrace(); + } + + long rollOverTime = rollingTimeUtil.computeNextRollingTime(duration, null); + + nextRollOvertime = new Date(rollOverTime); + } + + System.out.println(nextRollOvertime); + } + + public Date computeNextRollingTime(String rollingTimePeriod) throws Exception { + final Date ret; + + if (!StringUtils.isEmpty(rollingTimePeriod)) { + String computePeriod = getTimeLiteral(rollingTimePeriod); + int timeNumeral = getTimeNumeral(rollingTimePeriod, computePeriod); + + switch (computePeriod) { + case MINUTES: + ret = computeTopOfMinuteDate(timeNumeral); + break; + case HOURS: + ret = computeTopOfHourDate(timeNumeral); + break; + case DAYS: + ret = computeTopOfDayDate(timeNumeral); + break; + case WEEKS: + ret = computeTopOfWeekDate(timeNumeral); + break; + case MONTHS: + ret = computeTopofMonthDate(timeNumeral); + break; + case YEARS: + ret = computeTopOfYearDate(timeNumeral); + break; + default: + ret = null; + break; + } + } else { + throw new Exception("Unable to compute Next Rolling using the given Rollover period"); + } + + return ret; + } + + public String convertRolloverSecondsToRolloverPeriod(long duration) { + int days = (int) (duration / SECONDS_IN_DAY); + + duration %= SECONDS_IN_DAY; + + int hours = (int) (duration / SECONDS_IN_HOUR); + + duration %= SECONDS_IN_HOUR; + + int minutes = (int) (duration / SECONDS_IN_MINUTE); + + String ret = null; + + if (days != 0) { + if (hours == 0 && minutes == 0) { + ret = (days + DAYS); + } + } else if (hours != 0) { + if (minutes == 0) { + ret = (hours + HOURS); + } + } else if (minutes != 0) { + ret = (minutes + MINUTES); + } + + return ret; + } + + public long computeNextRollingTime(long durationSeconds, Date previousRolloverTime) { + long now = System.currentTimeMillis(); + long nextRolloverTime = (previousRolloverTime == null) ? now : previousRolloverTime.getTime(); + long durationMillis = (durationSeconds < 1 ? 1 : durationSeconds) * 1000; + + while (nextRolloverTime <= now) { + nextRolloverTime += durationMillis; + } + + return nextRolloverTime; + } + + private Date computeTopOfYearDate(int years) { + Calendar calendarStart = Calendar.getInstance(); + + calendarStart.add(Calendar.YEAR, years); + calendarStart.set(Calendar.MONTH, 0); + calendarStart.set(Calendar.DAY_OF_MONTH, 1); + calendarStart.set(Calendar.HOUR_OF_DAY, 0); + calendarStart.clear(Calendar.MINUTE); + calendarStart.clear(Calendar.SECOND); + calendarStart.clear(Calendar.MILLISECOND); + + return calendarStart.getTime(); + } + + private Date computeTopofMonthDate(int months) { + Calendar calendarMonth = Calendar.getInstance(); + + calendarMonth.set(Calendar.DAY_OF_MONTH, 1); + calendarMonth.add(Calendar.MONTH, months); + calendarMonth.set(Calendar.HOUR_OF_DAY, 0); + calendarMonth.clear(Calendar.MINUTE); + calendarMonth.clear(Calendar.SECOND); + calendarMonth.clear(Calendar.MILLISECOND); + + return calendarMonth.getTime(); + } + + private Date computeTopOfWeekDate(int weeks) { + Calendar calendarWeek = Calendar.getInstance(); + + calendarWeek.set(Calendar.DAY_OF_WEEK, calendarWeek.getFirstDayOfWeek()); + calendarWeek.add(Calendar.WEEK_OF_YEAR, weeks); + calendarWeek.set(Calendar.HOUR_OF_DAY, 0); + calendarWeek.clear(Calendar.MINUTE); + calendarWeek.clear(Calendar.SECOND); + calendarWeek.clear(Calendar.MILLISECOND); + + return calendarWeek.getTime(); + } + + private Date computeTopOfDayDate(int days) { + Calendar calendarDay = Calendar.getInstance(); + + calendarDay.add(Calendar.DAY_OF_MONTH, days); + calendarDay.set(Calendar.HOUR_OF_DAY, 0); + calendarDay.clear(Calendar.MINUTE); + calendarDay.clear(Calendar.SECOND); + calendarDay.clear(Calendar.MILLISECOND); + + return calendarDay.getTime(); + } + + private Date computeTopOfHourDate(int hours) { + Calendar calendarHour = Calendar.getInstance(); + + calendarHour.add(Calendar.HOUR_OF_DAY, hours); + calendarHour.clear(Calendar.MINUTE); + calendarHour.clear(Calendar.SECOND); + calendarHour.clear(Calendar.MILLISECOND); + + return calendarHour.getTime(); + } + + private Date computeTopOfMinuteDate(int mins) { + Calendar calendarMin = Calendar.getInstance(); + + calendarMin.add(Calendar.MINUTE, mins); + calendarMin.clear(Calendar.SECOND); + calendarMin.clear(Calendar.MILLISECOND); + + return calendarMin.getTime(); + } + + private int getTimeNumeral(String rollOverPeriod, String timeLiteral) { + return Integer.parseInt(rollOverPeriod.substring(0, rollOverPeriod.length() - (rollOverPeriod.length() - rollOverPeriod.indexOf(timeLiteral)))); + } + + private String getTimeLiteral(String rollOverPeriod) throws Exception { + final String ret; + + if (StringUtils.isEmpty(rollOverPeriod)) { + throw new Exception("empty rollover period"); + } else if (rollOverPeriod.endsWith(MINUTES)) { + ret = MINUTES; + } else if (rollOverPeriod.endsWith(HOURS)) { + ret = HOURS; + } else if (rollOverPeriod.endsWith(DAYS)) { + ret = DAYS; + } else if (rollOverPeriod.endsWith(WEEKS)) { + ret = WEEKS; + } else if (rollOverPeriod.endsWith(MONTHS)) { + ret = MONTHS; + } else if (rollOverPeriod.endsWith(YEARS)) { + ret = YEARS; + } else { + throw new Exception(rollOverPeriod + ": invalid rollover period"); + } + + return ret; + } } diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/SolrAppUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/SolrAppUtil.java index 5cb8b1be1f..83612bde71 100644 --- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/SolrAppUtil.java +++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/SolrAppUtil.java @@ -28,12 +28,11 @@ import java.util.Collection; public class SolrAppUtil { + private SolrAppUtil() { + // to block instantiation + } + public static UpdateResponse addDocsToSolr(final SolrClient solrClient, final Collection docs) throws Exception { - return MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() { - @Override - public UpdateResponse run() throws Exception { - return solrClient.add(docs); - } - }); + return MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> solrClient.add(docs)); } } diff --git a/agents-audit/src/test/java/org/apache/ranger/audit/utils/RangerJSONAuditWriterTest.java b/agents-audit/src/test/java/org/apache/ranger/audit/utils/RangerJSONAuditWriterTest.java index 3d65790d50..df1f48e893 100644 --- a/agents-audit/src/test/java/org/apache/ranger/audit/utils/RangerJSONAuditWriterTest.java +++ b/agents-audit/src/test/java/org/apache/ranger/audit/utils/RangerJSONAuditWriterTest.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -23,41 +23,42 @@ import org.junit.Test; import java.io.IOException; -import java.util.Map; +import java.io.PrintWriter; +import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Properties; -import java.util.Collections; -import java.io.PrintWriter; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.spy; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; public class RangerJSONAuditWriterTest { - - public Properties props; + public Properties props; public Map auditConfigs; - public void setup(){ - props = new Properties(); - props.setProperty("test.dir", "/tmp"); + public void setup() { + props = new Properties(); auditConfigs = new HashMap<>(); + + props.setProperty("test.dir", "/tmp"); + auditConfigs.put(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS); } @Test public void checkReUseFlagInStreamErrors() throws Exception { - RangerJSONAuditWriter jsonAuditWriter = spy(new RangerJSONAuditWriter()); - PrintWriter out = mock(PrintWriter.class); + PrintWriter out = mock(PrintWriter.class); setup(); - jsonAuditWriter.init(props, "test", "localfs", auditConfigs); + jsonAuditWriter.init(props, "test", "localfs", auditConfigs); assertFalse(jsonAuditWriter.reUseLastLogFile); + when(jsonAuditWriter.getLogFileStream()).thenReturn(out); when(out.checkError()).thenReturn(true); assertFalse(jsonAuditWriter.logJSON(Collections.singleton("This event will not be logged!"))); @@ -74,49 +75,48 @@ public void checkAppendtoFileWhenExceptionsOccur() throws Exception { RangerJSONAuditWriter jsonAuditWriter = spy(new RangerJSONAuditWriter()); setup(); + jsonAuditWriter.init(props, "test", "localfs", auditConfigs); jsonAuditWriter.createFileSystemFolders(); + // File creation should fail with an exception which will trigger append next time. - when(jsonAuditWriter.fileSystem.create(jsonAuditWriter.auditPath)) - .thenThrow(new IOException("Creation not allowed!")); + when(jsonAuditWriter.fileSystem.create(jsonAuditWriter.auditPath)).thenThrow(new IOException("Creation not allowed!")); jsonAuditWriter.logJSON(Collections.singleton("This event will not be logged!")); jsonAuditWriter.fileSystem.deleteOnExit(jsonAuditWriter.auditPath); - assertTrue(jsonAuditWriter.reUseLastLogFile); assertNull(jsonAuditWriter.ostream); assertNull(jsonAuditWriter.logWriter); jsonAuditWriter.fileSystem = mock(FileSystem.class); - when(jsonAuditWriter.fileSystem - .hasPathCapability(jsonAuditWriter.auditPath, CommonPathCapabilities.FS_APPEND)).thenReturn(true); + when(jsonAuditWriter.fileSystem.hasPathCapability(jsonAuditWriter.auditPath, CommonPathCapabilities.FS_APPEND)).thenReturn(true); jsonAuditWriter.fileSystem.deleteOnExit(jsonAuditWriter.auditPath); // this will lead to an exception since append is called on mocks - jsonAuditWriter.logJSON(Collections.singleton( - "This event should be appended but won't be as appended we use mocks.")); + jsonAuditWriter.logJSON(Collections.singleton("This event should be appended but won't be as appended we use mocks.")); } - @Test public void checkFileRolloverAfterThreshold() throws Exception { RangerJSONAuditWriter jsonAuditWriter = spy(new RangerJSONAuditWriter()); setup(); + props.setProperty("test.file.rollover.enable.periodic.rollover", "true"); props.setProperty("test.file.rollover.periodic.rollover.check.sec", "2"); // rollover log file after this interval + jsonAuditWriter.fileRolloverSec = 5; // in seconds jsonAuditWriter.init(props, "test", "localfs", auditConfigs); - assertTrue(jsonAuditWriter.logJSON(Collections.singleton("First file created and added this line!"))); + jsonAuditWriter.fileSystem.deleteOnExit(jsonAuditWriter.auditPath); // cleanup Thread.sleep(6000); - assertFalse(jsonAuditWriter.reUseLastLogFile); assertNull(jsonAuditWriter.ostream); assertNull(jsonAuditWriter.logWriter); assertTrue(jsonAuditWriter.logJSON(Collections.singleton("Second file created since rollover happened!"))); + jsonAuditWriter.fileSystem.deleteOnExit(jsonAuditWriter.auditPath); // cleanup jsonAuditWriter.closeWriter(); } diff --git a/agents-common/dev-support/spotbugsIncludeFile.xml b/agents-common/dev-support/spotbugsIncludeFile.xml deleted file mode 100644 index 9a0a9261a9..0000000000 --- a/agents-common/dev-support/spotbugsIncludeFile.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/agents-common/src/main/java/org/apache/ranger/plugin/util/PasswordUtils.java b/agents-common/src/main/java/org/apache/ranger/plugin/util/PasswordUtils.java index 546412b530..6cd4ee0441 100644 --- a/agents-common/src/main/java/org/apache/ranger/plugin/util/PasswordUtils.java +++ b/agents-common/src/main/java/org/apache/ranger/plugin/util/PasswordUtils.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.security.NoSuchAlgorithmException; import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import javax.crypto.Cipher; @@ -229,4 +231,120 @@ public static String getDecryptPassword(String password) { } return decryptedPwd; } + + /* Password Generator */ + public static final class PasswordGenerator { + private static final String LOWER = "abcdefghijklmnopqrstuvwxyz"; + private static final String UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + private static final String DIGITS = "0123456789"; + private static final String SYMBOLS = "!@#$%&*()_+-=[]|,./?><"; + private final boolean useLower; + private final boolean useUpper; + private final boolean useDigits; + private final boolean useSymbols; + + private PasswordGenerator(PasswordGeneratorBuilder builder) { + this.useLower = builder.useLower; + this.useUpper = builder.useUpper; + this.useDigits = builder.useDigits; + this.useSymbols = builder.useSymbols; + } + + public static class PasswordGeneratorBuilder { + private boolean useLower; + private boolean useUpper; + private boolean useDigits; + private boolean useSymbols; + + public PasswordGeneratorBuilder() { + this.useLower = false; + this.useUpper = false; + this.useDigits = false; + this.useSymbols = false; + } + + /** + * @param useLower true in case you would like to include lowercase + * characters (abc...xyz). Default false. + * @return the builder for chaining. + */ + public PasswordGeneratorBuilder useLower(boolean useLower) { + this.useLower = useLower; + return this; + } + + /** + * @param useUpper true in case you would like to include uppercase + * characters (ABC...XYZ). Default false. + * @return the builder for chaining. + */ + public PasswordGeneratorBuilder useUpper(boolean useUpper) { + this.useUpper = useUpper; + return this; + } + + /** + * @param useDigits true in case you would like to include digit + * characters (123...). Default false. + * @return the builder for chaining. + */ + public PasswordGeneratorBuilder useDigits(boolean useDigits) { + this.useDigits = useDigits; + return this; + } + + /** + * @param useSymbols true in case you would like to include + * punctuation characters (!@#...). Default false. + * @return the builder for chaining. + */ + public PasswordGeneratorBuilder useSymbols(boolean useSymbols) { + this.useSymbols = useSymbols; + return this; + } + + /** + * Get an object to use. + * + * @return the {@link PasswordGenerator} + * object. + */ + public PasswordGenerator build() { + return new PasswordGenerator(this); + } + } + + /** + * @param length the length of the password you would like to generate. + * @return a password that uses the categories you define when constructing + * the object with a probability. + */ + public String generate(int length) { + StringBuilder password = new StringBuilder(length); + SecureRandom secureRandom = new SecureRandom(); + + List charCategories = new ArrayList<>(4); + if (useLower) { + charCategories.add(LOWER); + } + if (useUpper) { + charCategories.add(UPPER); + } + if (useDigits) { + charCategories.add(DIGITS); + } + if (useSymbols) { + charCategories.add(SYMBOLS); + } + + // Build the password. + for (int i = 0; i < length; i++) { + int idxCatagory = (i < charCategories.size()) ? i : secureRandom.nextInt(charCategories.size()); + String charCategory = charCategories.get(idxCatagory); + int position = secureRandom.nextInt(charCategory.length()); + password.append(charCategory.charAt(position)); + } + return new String(password); + } + } } diff --git a/dev-support/checkstyle-suppressions.xml b/dev-support/checkstyle-suppressions.xml index 8fa9081650..75a5535113 100644 --- a/dev-support/checkstyle-suppressions.xml +++ b/dev-support/checkstyle-suppressions.xml @@ -22,4 +22,8 @@ + + + + diff --git a/intg/pom.xml b/intg/pom.xml index 9a551e1b54..24204c23af 100644 --- a/intg/pom.xml +++ b/intg/pom.xml @@ -25,6 +25,10 @@ ranger-intg + + true + false + org.apache.ranger @@ -147,7 +151,6 @@ - diff --git a/intg/src/main/java/org/apache/ranger/RangerClient.java b/intg/src/main/java/org/apache/ranger/RangerClient.java index a61c13fd21..5b9849761f 100644 --- a/intg/src/main/java/org/apache/ranger/RangerClient.java +++ b/intg/src/main/java/org/apache/ranger/RangerClient.java @@ -19,45 +19,58 @@ package org.apache.ranger; import com.fasterxml.jackson.core.type.TypeReference; +import com.sun.jersey.api.client.ClientResponse; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ranger.admin.client.datatype.RESTResponse; import org.apache.ranger.audit.provider.MiscUtil; import org.apache.ranger.authorization.hadoop.config.RangerPluginConfig; +import org.apache.ranger.plugin.model.RangerPluginInfo; +import org.apache.ranger.plugin.model.RangerPolicy; +import org.apache.ranger.plugin.model.RangerRole; +import org.apache.ranger.plugin.model.RangerSecurityZone; +import org.apache.ranger.plugin.model.RangerSecurityZoneHeaderInfo; +import org.apache.ranger.plugin.model.RangerService; +import org.apache.ranger.plugin.model.RangerServiceDef; +import org.apache.ranger.plugin.model.RangerServiceHeaderInfo; +import org.apache.ranger.plugin.model.RangerServiceTags; +import org.apache.ranger.plugin.util.GrantRevokeRoleRequest; import org.apache.ranger.plugin.util.JsonUtilsV2; import org.apache.ranger.plugin.util.RangerPurgeResult; +import org.apache.ranger.plugin.util.RangerRESTClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.sun.jersey.api.client.ClientResponse; -import org.apache.hadoop.conf.Configuration; -import org.apache.ranger.plugin.model.*; -import org.apache.ranger.admin.client.datatype.RESTResponse; -import org.apache.ranger.plugin.util.GrantRevokeRoleRequest; -import org.apache.ranger.plugin.util.RangerRESTClient; -import java.security.PrivilegedExceptionAction; import javax.ws.rs.HttpMethod; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.net.URI; -import java.util.*; +import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.Collections; +import java.util.HashMap; +import java.util.IllegalFormatException; +import java.util.List; +import java.util.Map; +import java.util.Set; public class RangerClient { - private static final Logger LOG = LoggerFactory.getLogger(RangerClient.class); - private static final String AUTH_KERBEROS = "kerberos"; + private static final Logger LOG = LoggerFactory.getLogger(RangerClient.class); + private static final String AUTH_KERBEROS = "kerberos"; // QueryParams - private static final String PARAM_DAYS = "days"; - private static final String PARAM_EXEC_USER = "execUser"; - private static final String PARAM_POLICY_NAME = "policyname"; - private static final String PARAM_SERVICE_NAME = "serviceName"; - private static final String PARAM_ZONE_NAME = "zoneName"; - private static final String PARAM_PURGE_RECORD_TYPE = "type"; - private static final String PARAM_PURGE_RETENTION_DAYS = "retentionDays"; + private static final String PARAM_DAYS = "days"; + private static final String PARAM_EXEC_USER = "execUser"; + private static final String PARAM_POLICY_NAME = "policyname"; + private static final String PARAM_SERVICE_NAME = "serviceName"; + private static final String PARAM_ZONE_NAME = "zoneName"; + private static final String PARAM_PURGE_RECORD_TYPE = "type"; + private static final String PARAM_PURGE_RETENTION_DAYS = "retentionDays"; private static final String PARAM_RELOAD_SERVICE_POLICIES_CACHE = "reloadServicePoliciesCache"; // URIs - private static final String URI_BASE = "/service/public/v2/api"; + private static final String URI_BASE = "/service/public/v2/api"; private static final String URI_SERVICEDEF = URI_BASE + "/servicedef"; private static final String URI_SERVICEDEF_BY_ID = URI_SERVICEDEF + "/%d"; @@ -93,7 +106,6 @@ public class RangerClient { private static final String URI_POLICY_DELTAS = URI_BASE + "/server/policydeltas"; private static final String URI_PURGE_RECORDS = URI_BASE + "/server/purge/records"; - // APIs public static final API CREATE_SERVICEDEF = new API(URI_SERVICEDEF, HttpMethod.POST, Response.Status.OK); public static final API UPDATE_SERVICEDEF_BY_ID = new API(URI_SERVICEDEF_BY_ID, HttpMethod.PUT, Response.Status.OK); @@ -153,7 +165,6 @@ public class RangerClient { public static final API DELETE_POLICY_DELTAS = new API(URI_POLICY_DELTAS, HttpMethod.DELETE, Response.Status.NO_CONTENT); public static final API PURGE_RECORDS = new API(URI_PURGE_RECORDS, HttpMethod.DELETE, Response.Status.OK); - private static final TypeReference TYPE_VOID = new TypeReference() {}; private static final TypeReference> TYPE_SET_STRING = new TypeReference>() {}; private static final TypeReference> TYPE_LIST_STRING = new TypeReference>() {}; @@ -168,31 +179,19 @@ public class RangerClient { private static final TypeReference> TYPE_LIST_SVC_HEADER_INFO = new TypeReference>() {}; private final RangerRESTClient restClient; - private boolean isSecureMode = false; - - private void authInit(String authType, String username, String password) { - if (AUTH_KERBEROS.equalsIgnoreCase(authType)) { - isSecureMode = true; - MiscUtil.loginWithKeyTab(password, username, null); - UserGroupInformation ugi = MiscUtil.getUGILoginUser(); - LOG.info("RangerClient.authInit() UGI user: " + ugi.getUserName() + " principal: " + username); - } else { - restClient.setBasicAuthInfo(username, password); - } - } + private boolean isSecureMode; public RangerClient(String hostName, String authType, String username, String password, String configFile) { restClient = new RangerRESTClient(hostName, configFile, new Configuration()); authInit(authType, username, password); } - public RangerClient(String hostname, String authType, String username, String password, String appId, String serviceType){ + public RangerClient(String hostname, String authType, String username, String password, String appId, String serviceType) { this(hostname, authType, username, password, - new RangerPluginConfig(serviceType, null,appId,null,null,null) + new RangerPluginConfig(serviceType, null, appId, null, null, null) .get("ranger.plugin." + serviceType + ".policy.rest.ssl.config.file")); } - public RangerClient(RangerRESTClient restClient) { this.restClient = restClient; } @@ -232,7 +231,6 @@ public List findServiceDefs(Map filter) throws return callAPI(FIND_SERVICEDEFS, filter, null, TYPE_LIST_SERVICE_DEF); } - /* * Service APIs */ @@ -268,7 +266,6 @@ public List findServices(Map filter) throws Range return callAPI(FIND_SERVICES, filter, null, TYPE_LIST_SERVICE); } - /* * Policy APIs */ @@ -285,7 +282,7 @@ public RangerPolicy updatePolicy(String serviceName, String policyName, RangerPo } public RangerPolicy updatePolicyByNameAndZone(String serviceName, String policyName, String zoneName, RangerPolicy policy) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_ZONE_NAME, zoneName); @@ -301,7 +298,7 @@ public void deletePolicy(long policyId) throws RangerServiceException { } public void deletePolicy(String serviceName, String policyName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_POLICY_NAME, policyName); queryParams.put("servicename", serviceName); @@ -309,9 +306,8 @@ public void deletePolicy(String serviceName, String policyName) throws RangerSer callAPI(DELETE_POLICY_BY_NAME, queryParams); } - public void deletePolicyByNameAndZone(String serviceName, String policyName, String zoneName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_POLICY_NAME, policyName); queryParams.put(PARAM_SERVICE_NAME, serviceName); @@ -329,12 +325,13 @@ public RangerPolicy getPolicy(String serviceName, String policyName) throws Rang } public RangerPolicy getPolicyByNameAndZone(String serviceName, String policyName, String zoneName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_ZONE_NAME, zoneName); return callAPI(GET_POLICY_BY_NAME.applyUrlFormat(serviceName, policyName), queryParams, null, RangerPolicy.class); } + public List getPoliciesInService(String serviceName) throws RangerServiceException { return callAPI(GET_POLICIES_IN_SERVICE.applyUrlFormat(serviceName), null, null, TYPE_LIST_POLICY); } @@ -343,7 +340,6 @@ public List findPolicies(Map filter) throws Ranger return callAPI(FIND_POLICIES, filter, null, TYPE_LIST_POLICY); } - /* * SecurityZone APIs */ @@ -403,7 +399,7 @@ public void deleteRole(long roleId) throws RangerServiceException { } public void deleteRole(String roleName, String execUser, String serviceName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_EXEC_USER, execUser); queryParams.put(PARAM_SERVICE_NAME, serviceName); @@ -416,7 +412,7 @@ public RangerRole getRole(long roleId) throws RangerServiceException { } public RangerRole getRole(String roleName, String execUser, String serviceName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_EXEC_USER, execUser); queryParams.put(PARAM_SERVICE_NAME, serviceName); @@ -425,7 +421,7 @@ public RangerRole getRole(String roleName, String execUser, String serviceName) } public List getAllRoleNames(String execUser, String serviceName) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_EXEC_USER, execUser); queryParams.put(PARAM_SERVICE_NAME, serviceName); @@ -449,7 +445,6 @@ public RESTResponse revokeRole(String serviceName, GrantRevokeRoleRequest reques return callAPI(REVOKE_ROLE.applyUrlFormat(serviceName), null, request, RESTResponse.class); } - /* * Admin APIs */ @@ -466,7 +461,7 @@ public List getPluginsInfo() throws RangerServiceException { } public void deletePolicyDeltas(int days, boolean reloadServicePoliciesCache) throws RangerServiceException { - Map queryParams = new HashMap<>(); + Map queryParams = new HashMap<>(); queryParams.put(PARAM_DAYS, String.valueOf(days)); queryParams.put(PARAM_RELOAD_SERVICE_POLICIES_CACHE, String.valueOf(reloadServicePoliciesCache)); @@ -483,6 +478,17 @@ public List purgeRecords(String recordType, int retentionDays return callAPI(PURGE_RECORDS, queryParams, null, TYPE_LIST_PURGE_RESULT); } + private void authInit(String authType, String username, String password) { + if (AUTH_KERBEROS.equalsIgnoreCase(authType)) { + isSecureMode = true; + MiscUtil.loginWithKeyTab(password, username, null); + UserGroupInformation ugi = MiscUtil.getUGILoginUser(); + LOG.info("RangerClient.authInit() UGI user: " + ugi.getUserName() + " principal: " + username); + } else { + restClient.setBasicAuthInfo(username, password); + } + } + private ClientResponse invokeREST(API api, Map params, Object request) throws RangerServiceException { final ClientResponse clientResponse; try { @@ -517,7 +523,7 @@ private ClientResponse invokeREST(API api, Map params, Object re private ClientResponse responseHandler(API api, Map params, Object request) throws RangerServiceException { final ClientResponse clientResponse; - if (LOG.isDebugEnabled()){ + if (LOG.isDebugEnabled()) { LOG.debug("Call : {} {}", api.getMethod(), api.getNormalizedPath()); LOG.debug("Content-type : {} ", api.getConsumes()); LOG.debug("Accept : {} ", api.getProduces()); @@ -530,7 +536,7 @@ private ClientResponse responseHandler(API api, Map params, Obje try { clientResponse = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> { try { - return invokeREST(api,params,request); + return invokeREST(api, params, request); } catch (RangerServiceException e) { LOG.error(e.getMessage()); } @@ -540,7 +546,7 @@ private ClientResponse responseHandler(API api, Map params, Obje throw new RangerServiceException(excp); } } else { - clientResponse = invokeREST(api,params,request); + clientResponse = invokeREST(api, params, request); } if (LOG.isDebugEnabled()) { @@ -561,7 +567,7 @@ private ClientResponse responseHandler(API api, Map params, Obje private void callAPI(API api, Map params) throws RangerServiceException { if (LOG.isDebugEnabled()) { - LOG.debug("==> callAPI({},{})",api, params); + LOG.debug("==> callAPI({},{})", api, params); } responseHandler(api, params, null); @@ -574,7 +580,7 @@ private void callAPI(API api, Map params) throws RangerServiceEx private T callAPI(API api, Map params, Object request, TypeReference responseType) throws RangerServiceException { T ret = null; if (LOG.isDebugEnabled()) { - LOG.debug("==> callAPI({},{},{})",api, params, request); + LOG.debug("==> callAPI({},{},{})", api, params, request); LOG.debug("------------------------------------------------------"); } final ClientResponse clientResponse = responseHandler(api, params, request); @@ -597,7 +603,7 @@ private T callAPI(API api, Map params, Object request, TypeR private T callAPI(API api, Map params, Object request, Class responseType) throws RangerServiceException { T ret = null; if (LOG.isDebugEnabled()) { - LOG.debug("==> callAPI({},{},{})",api, params, request); + LOG.debug("==> callAPI({},{},{})", api, params, request); LOG.debug("------------------------------------------------------"); } final ClientResponse clientResponse = responseHandler(api, params, request); @@ -626,7 +632,6 @@ public static class API { private final String consumes; private final String produces; - public API(String path, String method, Response.Status expectedStatus) { this(path, method, expectedStatus, MediaType.APPLICATION_JSON, MediaType.APPLICATION_JSON); } @@ -682,9 +687,9 @@ public String getNormalizedPath() { } public API applyUrlFormat(Object... params) throws RangerServiceException { - try{ + try { return new API(String.format(path, params), method, expectedStatus, consumes, produces); - } catch(IllegalFormatException e) { + } catch (IllegalFormatException e) { LOG.error("Arguments not formatted properly"); throw new RangerServiceException(e); diff --git a/intg/src/main/java/org/apache/ranger/RangerServiceException.java b/intg/src/main/java/org/apache/ranger/RangerServiceException.java index 9b909a4bb3..9388c6cca5 100644 --- a/intg/src/main/java/org/apache/ranger/RangerServiceException.java +++ b/intg/src/main/java/org/apache/ranger/RangerServiceException.java @@ -40,5 +40,7 @@ private RangerServiceException(RangerClient.API api, ClientResponse.Status statu this.status = status; } - public ClientResponse.Status getStatus() { return status; } + public ClientResponse.Status getStatus() { + return status; + } } diff --git a/intg/src/test/java/org/apache/ranger/TestRangerClient.java b/intg/src/test/java/org/apache/ranger/TestRangerClient.java index 5120aceae4..be52886df5 100644 --- a/intg/src/test/java/org/apache/ranger/TestRangerClient.java +++ b/intg/src/test/java/org/apache/ranger/TestRangerClient.java @@ -45,14 +45,13 @@ import java.util.Set; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.anyString; @ExtendWith(MockitoExtension.class) public class TestRangerClient { - private static final RangerClient.API GET_TEST_API = new RangerClient.API("/relative/path/test", HttpMethod.GET, Response.Status.OK); - + private static final RangerClient.API GET_TEST_API = new RangerClient.API("/relative/path/test", HttpMethod.GET, Response.Status.OK); @BeforeMethod public void setup() { @@ -75,7 +74,7 @@ public void apiGet_Success() throws Exception { Assertions.assertNotNull(ret); Assertions.assertEquals(ret.getName(), service.getName()); - } catch(RangerServiceException excp){ + } catch (RangerServiceException excp) { Assertions.fail("Not expected to fail! Found exception: " + excp); } } @@ -93,8 +92,8 @@ public void apiGet_ServiceUnavailable() throws Exception { RangerService ret = client.getService(1L); Assertions.fail("Expected to fail with SERVICE_UNAVAILABLE"); - } catch(RangerServiceException excp){ - Assertions.assertEquals(ClientResponse.Status.SERVICE_UNAVAILABLE, excp.getStatus(), "Expected to fail with status SERVICE_UNAVAILABLE"); + } catch (RangerServiceException excp) { + Assertions.assertEquals(ClientResponse.Status.SERVICE_UNAVAILABLE, excp.getStatus(), "Expected to fail with status SERVICE_UNAVAILABLE"); } } @@ -111,7 +110,7 @@ public void apiGet_FailWithUnexpectedStatusCode() throws Exception { client.getService(1L); Assertions.fail("supposed to fail with RangerServiceException"); - } catch(RangerServiceException excp) { + } catch (RangerServiceException excp) { Assertions.assertTrue(excp.getMessage().contains("statusCode=" + ClientResponse.Status.INTERNAL_SERVER_ERROR.getStatusCode())); Assertions.assertTrue(excp.getMessage().contains("status=" + ClientResponse.Status.INTERNAL_SERVER_ERROR.getReasonPhrase())); } @@ -128,7 +127,7 @@ public void apiGet_FailWithNullResponse() throws Exception { client.getService(1L); Assertions.fail("supposed to fail with RangerServiceException"); - } catch(RangerServiceException excp) { + } catch (RangerServiceException excp) { Assertions.assertTrue(excp.getMessage().contains("statusCode=null")); Assertions.assertTrue(excp.getMessage().contains("status=null")); } @@ -137,9 +136,9 @@ public void apiGet_FailWithNullResponse() throws Exception { @Test public void api_UrlMissingFormat() { try { - new RangerClient.API("%dtest%dpath%d", HttpMethod.GET, Response.Status.OK).applyUrlFormat(1,1); + new RangerClient.API("%dtest%dpath%d", HttpMethod.GET, Response.Status.OK).applyUrlFormat(1, 1); Assertions.fail("supposed to fail with RangerServiceException"); - } catch(RangerServiceException exp){ + } catch (RangerServiceException exp) { Assertions.assertTrue(exp.getMessage().contains("MissingFormatArgumentException")); } } @@ -149,23 +148,23 @@ public void api_UrlIllegalFormatConversion() { try { new RangerClient.API("testpath%d", HttpMethod.GET, Response.Status.OK).applyUrlFormat("1"); Assertions.fail("supposed to fail with RangerServiceException"); - } catch(RangerServiceException exp){ + } catch (RangerServiceException exp) { Assertions.assertTrue(exp.getMessage().contains("IllegalFormatConversionException")); } try { new RangerClient.API("testpath%f", HttpMethod.GET, Response.Status.OK).applyUrlFormat(1); Assertions.fail("supposed to fail with RangerServiceException"); - } catch(RangerServiceException exp){ + } catch (RangerServiceException exp) { Assertions.assertTrue(exp.getMessage().contains("IllegalFormatConversionException")); } } @Test public void testGetSecurityZoneHeaders() throws Exception { - RangerRESTClient restClient = mock(RangerRESTClient.class); - ClientResponse response = mock(ClientResponse.class); - RangerClient client = new RangerClient(restClient); + RangerRESTClient restClient = mock(RangerRESTClient.class); + ClientResponse response = mock(ClientResponse.class); + RangerClient client = new RangerClient(restClient); List expected = new ArrayList<>(); @@ -188,9 +187,9 @@ public void testGetSecurityZoneHeaders() throws Exception { @Test public void testGetSecurityZoneServiceHeaders() throws Exception { - RangerRESTClient restClient = mock(RangerRESTClient.class); - ClientResponse response = mock(ClientResponse.class); - RangerClient client = new RangerClient(restClient); + RangerRESTClient restClient = mock(RangerRESTClient.class); + ClientResponse response = mock(ClientResponse.class); + RangerClient client = new RangerClient(restClient); List expected = new ArrayList<>(); @@ -215,12 +214,12 @@ public void testGetSecurityZoneServiceHeaders() throws Exception { @Test public void testGetSecurityZoneNamesForResource() throws RangerServiceException { - RangerClient client = Mockito.mock(RangerClient.class); - String serviceName = "dev_hive"; - Map resource = new HashMap() {{ - put("database", "testdb"); - put("table", "testtbl1"); - }}; + RangerClient client = Mockito.mock(RangerClient.class); + String serviceName = "dev_hive"; + Map resource = new HashMap() {{ + put("database", "testdb"); + put("table", "testtbl1"); + }}; when(client.getSecurityZoneNamesForResource(serviceName, resource)).thenReturn(Collections.emptySet()); @@ -253,4 +252,4 @@ public void testPurgeRecords() throws RangerServiceException { Assertions.assertEquals(Collections.emptyList(), purgeResults); } -} \ No newline at end of file +} diff --git a/kms/dev-support/findbugsExcludeFile.xml b/kms/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 929936dad3..0000000000 --- a/kms/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/plugin-kudu/pom.xml b/plugin-kudu/pom.xml index d68042a59f..9576215d55 100644 --- a/plugin-kudu/pom.xml +++ b/plugin-kudu/pom.xml @@ -28,6 +28,8 @@ Kudu Security Plugin Kudu Security Plugin + true + false UTF-8 diff --git a/plugin-kudu/src/main/java/org/apache/ranger/services/kudu/RangerServiceKudu.java b/plugin-kudu/src/main/java/org/apache/ranger/services/kudu/RangerServiceKudu.java index a2b66de33c..e3c01df6b9 100644 --- a/plugin-kudu/src/main/java/org/apache/ranger/services/kudu/RangerServiceKudu.java +++ b/plugin-kudu/src/main/java/org/apache/ranger/services/kudu/RangerServiceKudu.java @@ -29,17 +29,15 @@ * RangerService for Apache Kudu. */ public class RangerServiceKudu extends RangerBaseService { - @Override public HashMap validateConfig() throws Exception { - // TODO: implement configure validation for Kudu policies. - return new HashMap<>(); + // TODO: implement configure validation for Kudu policies. + return new HashMap<>(); } @Override public List lookupResource(ResourceLookupContext context) throws Exception { - // TODO: implement resource lookup for Kudu policies. - return new ArrayList<>(); + // TODO: implement resource lookup for Kudu policies. + return new ArrayList<>(); } - } diff --git a/plugin-nifi-registry/pom.xml b/plugin-nifi-registry/pom.xml index 51baca0574..d62ee2fe4b 100644 --- a/plugin-nifi-registry/pom.xml +++ b/plugin-nifi-registry/pom.xml @@ -28,6 +28,8 @@ NiFi Registry Security Plugin NiFi Registry Security Plugin + true + false UTF-8 diff --git a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/RangerServiceNiFiRegistry.java b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/RangerServiceNiFiRegistry.java index bbf0593064..100288c55e 100644 --- a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/RangerServiceNiFiRegistry.java +++ b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/RangerServiceNiFiRegistry.java @@ -24,6 +24,7 @@ import org.apache.ranger.services.nifi.registry.client.NiFiRegistryConnectionMgr; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import java.util.HashMap; import java.util.List; @@ -31,32 +32,27 @@ * RangerService for Apache NiFi Registry. */ public class RangerServiceNiFiRegistry extends RangerBaseService { - private static final Logger LOG = LoggerFactory.getLogger(RangerServiceNiFiRegistry.class); @Override public HashMap validateConfig() throws Exception { HashMap ret; - String serviceName = getServiceName(); + String serviceName = getServiceName(); - if (LOG.isDebugEnabled()) { - LOG.debug("==> RangerServiceNiFiRegistry.validateConfig Service: (" + serviceName + " )"); - } + LOG.debug("==> RangerServiceNiFiRegistry.validateConfig Service: ({})", serviceName); if (configs != null) { try { ret = NiFiRegistryConnectionMgr.connectionTest(serviceName, configs); } catch (Exception e) { - LOG.error("<== RangerServiceNiFiRegistry.validateConfig Error:", e); + LOG.error("<== RangerServiceNiFiRegistry.validateConfig Error: ", e); throw e; } } else { throw new IllegalStateException("No Configuration found"); } - if (LOG.isDebugEnabled()) { - LOG.debug("<== RangerServiceNiFiRegistry.validateConfig Response : (" + ret + " )"); - } + LOG.debug("<== RangerServiceNiFiRegistry.validateConfig Response : ({})", ret); return ret; } @@ -66,5 +62,4 @@ public List lookupResource(ResourceLookupContext context) throws Excepti final NiFiRegistryClient client = NiFiRegistryConnectionMgr.getNiFiRegistryClient(serviceName, configs); return client.getResources(context); } - } diff --git a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryAuthType.java b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryAuthType.java index 2956afea6f..4da191f306 100644 --- a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryAuthType.java +++ b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryAuthType.java @@ -22,8 +22,6 @@ * Possible authentication types for NiFi Registry. */ public enum NiFiRegistryAuthType { - NONE, SSL - } diff --git a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryClient.java b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryClient.java index f1cab4a88f..acfd451d09 100644 --- a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryClient.java +++ b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryClient.java @@ -18,6 +18,8 @@ */ package org.apache.ranger.services.nifi.registry.client; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; @@ -28,8 +30,6 @@ import org.apache.commons.lang.StringUtils; import org.apache.ranger.plugin.client.BaseClient; import org.apache.ranger.plugin.service.ResourceLookupContext; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +38,7 @@ import javax.net.ssl.SSLPeerUnverifiedException; import javax.net.ssl.SSLSession; import javax.ws.rs.core.Response; + import java.security.cert.Certificate; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; @@ -50,64 +51,59 @@ * Client to communicate with NiFi Registry and retrieve available resources. */ public class NiFiRegistryClient { - private static final Logger LOG = LoggerFactory.getLogger(NiFiRegistryClient.class); - static final String SUCCESS_MSG = "ConnectionTest Successful"; static final String FAILURE_MSG = "Unable to retrieve any resources using given parameters. "; - - private final String url; - private final SSLContext sslContext; + private final String url; + private final SSLContext sslContext; private final HostnameVerifier hostnameVerifier; - private final ObjectMapper mapper = new ObjectMapper(); + private final ObjectMapper mapper = new ObjectMapper(); public NiFiRegistryClient(final String url, final SSLContext sslContext) { - this.url = url; - this.sslContext = sslContext; + this.url = url; + this.sslContext = sslContext; this.hostnameVerifier = new NiFiRegistryHostnameVerifier(); } public HashMap connectionTest() { - String errMsg = ""; - boolean connectivityStatus; + String errMsg = ""; + boolean connectivityStatus; HashMap responseData = new HashMap<>(); try { - final WebResource resource = getWebResource(); + final WebResource resource = getWebResource(); final ClientResponse response = getResponse(resource, "application/json"); - if (LOG.isDebugEnabled()) { - LOG.debug("Got response from NiFi with status code " + response.getStatus()); - } + LOG.debug("Got response from NiFi with status code {}", response.getStatus()); if (Response.Status.OK.getStatusCode() == response.getStatus()) { connectivityStatus = true; } else { connectivityStatus = false; - errMsg = "Status Code = " + response.getStatus(); + errMsg = "Status Code = " + response.getStatus(); } - } catch (Exception e) { - LOG.error("Connection to NiFi failed due to " + e.getMessage(), e); + LOG.error("Connection to NiFi failed due to {}", e.getMessage(), e); connectivityStatus = false; - errMsg = e.getMessage(); + errMsg = e.getMessage(); } if (connectivityStatus) { BaseClient.generateResponseDataMap(connectivityStatus, SUCCESS_MSG, SUCCESS_MSG, null, null, responseData); } else { - BaseClient.generateResponseDataMap(connectivityStatus, FAILURE_MSG, FAILURE_MSG + errMsg, null, null, responseData); + String errorMsg = FAILURE_MSG + errMsg; + BaseClient.generateResponseDataMap(connectivityStatus, FAILURE_MSG, errorMsg, null, null, responseData); } if (LOG.isDebugEnabled()) { - LOG.debug("Response Data - " + responseData); + LOG.debug("Response Data - {}", responseData); } return responseData; } public List getResources(ResourceLookupContext context) throws Exception { - final WebResource resource = getWebResource(); + final WebResource resource = getWebResource(); final ClientResponse response = getResponse(resource, "application/json"); if (Response.Status.OK.getStatusCode() != response.getStatus()) { @@ -138,6 +134,18 @@ public List getResources(ResourceLookupContext context) throws Exception } } + public String getUrl() { + return url; + } + + public SSLContext getSslContext() { + return sslContext; + } + + public HostnameVerifier getHostnameVerifier() { + return hostnameVerifier; + } + protected WebResource getWebResource() { final ClientConfig config = new DefaultClientConfig(); if (sslContext != null) { @@ -153,65 +161,49 @@ protected ClientResponse getResponse(WebResource resource, String accept) { return resource.accept(accept).get(ClientResponse.class); } - public String getUrl() { - return url; - } - - public SSLContext getSslContext() { - return sslContext; - } - - public HostnameVerifier getHostnameVerifier() { - return hostnameVerifier; - } - /** * Custom hostname verifier that checks subject alternative names against the hostname of the URI. */ private static class NiFiRegistryHostnameVerifier implements HostnameVerifier { - @Override public boolean verify(final String hostname, final SSLSession ssls) { try { for (final Certificate peerCertificate : ssls.getPeerCertificates()) { if (peerCertificate instanceof X509Certificate) { - final X509Certificate x509Cert = (X509Certificate) peerCertificate; - final List subjectAltNames = getSubjectAlternativeNames(x509Cert); + final X509Certificate x509Cert = (X509Certificate) peerCertificate; + final List subjectAltNames = getSubjectAlternativeNames(x509Cert); if (subjectAltNames.contains(hostname.toLowerCase())) { return true; } } } } catch (final SSLPeerUnverifiedException | CertificateParsingException ex) { - LOG.warn("Hostname Verification encountered exception verifying hostname due to: " + ex, ex); + LOG.warn("Hostname Verification encountered exception verifying hostname due to: {}", ex, ex); } return false; } private List getSubjectAlternativeNames(final X509Certificate certificate) throws CertificateParsingException { - final List result = new ArrayList<>(); + final List result = new ArrayList<>(); final Collection> altNames = certificate.getSubjectAlternativeNames(); if (altNames == null) { return result; } - for (final List generalName : altNames) { + for (final List generalName : altNames) { /** * generalName has the name type as the first element a String or byte array for the second element. We return any general names that are String types. - * * We don't inspect the numeric name type because some certificates incorrectly put IPs and DNS names under the wrong name types. */ - if (generalName.size() > 1) { - final Object value = generalName.get(1); - if (value instanceof String) { - result.add(((String) value).toLowerCase()); - } - } - + if (generalName.size() > 1) { + final Object value = generalName.get(1); + if (value instanceof String) { + result.add(((String) value).toLowerCase()); + } + } } return result; } } - } diff --git a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConfigs.java b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConfigs.java index 248d0613f7..024fce8774 100644 --- a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConfigs.java +++ b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConfigs.java @@ -22,18 +22,13 @@ * Config property names from the NiFi Registry service definition. */ public interface NiFiRegistryConfigs { - - String NIFI_REG_URL = "nifi.registry.url"; - String NIFI_REG_AUTHENTICATION_TYPE = "nifi.registry.authentication"; - - String NIFI_REG_SSL_KEYSTORE = "nifi.registry.ssl.keystore"; - String NIFI_REG_SSL_KEYSTORE_TYPE = "nifi.registry.ssl.keystoreType"; - String NIFI_REG_SSL_KEYSTORE_PASSWORD = "nifi.registry.ssl.keystorePassword"; - - String NIFI_REG_SSL_TRUSTSTORE = "nifi.registry.ssl.truststore"; - String NIFI_REG_SSL_TRUSTSTORE_TYPE = "nifi.registry.ssl.truststoreType"; - String NIFI_REG_SSL_TRUSTSTORE_PASSWORD = "nifi.registry.ssl.truststorePassword"; - + String NIFI_REG_URL = "nifi.registry.url"; + String NIFI_REG_AUTHENTICATION_TYPE = "nifi.registry.authentication"; + String NIFI_REG_SSL_KEYSTORE = "nifi.registry.ssl.keystore"; + String NIFI_REG_SSL_KEYSTORE_TYPE = "nifi.registry.ssl.keystoreType"; + String NIFI_REG_SSL_KEYSTORE_PASSWORD = "nifi.registry.ssl.keystorePassword"; + String NIFI_REG_SSL_TRUSTSTORE = "nifi.registry.ssl.truststore"; + String NIFI_REG_SSL_TRUSTSTORE_TYPE = "nifi.registry.ssl.truststoreType"; + String NIFI_REG_SSL_TRUSTSTORE_PASSWORD = "nifi.registry.ssl.truststorePassword"; String NIFI_REG_SSL_USER_DEFAULT_CONTEXT = "nifi.registry.ssl.use.default.context"; - } diff --git a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConnectionMgr.java b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConnectionMgr.java index 938504245b..70e6e8ff35 100644 --- a/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConnectionMgr.java +++ b/plugin-nifi-registry/src/main/java/org/apache/ranger/services/nifi/registry/client/NiFiRegistryConnectionMgr.java @@ -18,7 +18,6 @@ */ package org.apache.ranger.services.nifi.registry.client; - import org.apache.commons.lang.StringUtils; import org.apache.ranger.plugin.client.BaseClient; import org.slf4j.Logger; @@ -27,6 +26,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManagerFactory; + import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; @@ -46,16 +46,17 @@ * Creates a NiFiRegistryClient and provides method to test a connection to NiFi Registry. */ public class NiFiRegistryConnectionMgr { - - private static final Logger LOG = LoggerFactory.getLogger(NiFiRegistryConnectionMgr.class); + private static final Logger LOG = LoggerFactory.getLogger(NiFiRegistryConnectionMgr.class); private static final String SSL_ALGORITHM = "TLSv1.2"; private static final String API_RESOURCES_PATH = "/nifi-registry-api/policies/resources"; - static final String INVALID_URL_MSG = "NiFi Registry URL must be a valid URL of the form " + - "http(s)://(:)" + API_RESOURCES_PATH; + static final String INVALID_URL_MSG = "NiFi Registry URL must be a valid URL of the form http(s)://(:)" + API_RESOURCES_PATH; + private NiFiRegistryConnectionMgr() { + throw new UnsupportedOperationException("This is a utility class and cannot be instantiated"); + } - static public NiFiRegistryClient getNiFiRegistryClient(String serviceName, Map configs) throws Exception { + public static NiFiRegistryClient getNiFiRegistryClient(String serviceName, Map configs) throws Exception { final String url = configs.get(NiFiRegistryConfigs.NIFI_REG_URL); validateNotBlank(url, "NiFi Registry URL is required for " + serviceName); validateUrl(url); @@ -64,30 +65,27 @@ static public NiFiRegistryClient getNiFiRegistryClient(String serviceName, Map connectionTest(String serviceName, Map ret = new HashMap<>(); + final HashMap ret = new HashMap<>(); BaseClient.generateResponseDataMap(false, "Error creating NiFi Registry client", e.getMessage(), null, null, ret); return ret; } @@ -151,16 +148,11 @@ private static void validateUrl(String url) { } } - private static SSLContext createSslContext( - final String keystore, final char[] keystorePasswd, final String keystoreType, - final String truststore, final char[] truststorePasswd, final String truststoreType, - final String protocol) - throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException, - UnrecoverableKeyException, KeyManagementException { - + private static SSLContext createSslContext(final String keystore, final char[] keystorePasswd, final String keystoreType, final String truststore, final char[] truststorePasswd, final String truststoreType, final String protocol) + throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException, UnrecoverableKeyException, KeyManagementException { // prepare the keystore final KeyStore keyStore = KeyStore.getInstance(keystoreType); - try (final InputStream keyStoreStream = new FileInputStream(keystore)) { + try (InputStream keyStoreStream = new FileInputStream(keystore)) { keyStore.load(keyStoreStream, keystorePasswd); } final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); @@ -168,7 +160,7 @@ private static SSLContext createSslContext( // prepare the truststore final KeyStore trustStore = KeyStore.getInstance(truststoreType); - try (final InputStream trustStoreStream = new FileInputStream(truststore)) { + try (InputStream trustStoreStream = new FileInputStream(truststore)) { trustStore.load(trustStoreStream, truststorePasswd); } final TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); @@ -179,5 +171,4 @@ private static SSLContext createSslContext( sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), new SecureRandom()); return sslContext; } - } diff --git a/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryClient.java b/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryClient.java index 7db646f4dd..4775a33a58 100644 --- a/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryClient.java +++ b/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryClient.java @@ -28,6 +28,7 @@ import org.mockito.Mockito; import javax.ws.rs.core.Response; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URL; @@ -39,12 +40,11 @@ import static org.mockito.Mockito.when; public class TestNiFiRegistryClient { - private NiFiRegistryClient registryClient; @Before public void setup() throws IOException { - final URL responseFile = TestNiFiRegistryClient.class.getResource("/resources-response.json"); + final URL responseFile = TestNiFiRegistryClient.class.getResource("/resources-response.json"); final String resourcesResponse = Resources.toString(responseFile, StandardCharsets.UTF_8); registryClient = new MockNiFiRegistryClient(resourcesResponse, 200); } @@ -135,18 +135,16 @@ public void testConnectionTestFailure() { Assert.assertEquals(NiFiRegistryClient.FAILURE_MSG, ret.get("message")); } - /** * Extend NiFiRegistryClient to return mock responses. */ private static final class MockNiFiRegistryClient extends NiFiRegistryClient { - - private int statusCode; + private int statusCode; private String responseEntity; private MockNiFiRegistryClient(String responseEntity, int statusCode) { super("http://localhost:18080/nifi-registry-api/policiesresources", null); - this.statusCode = statusCode; + this.statusCode = statusCode; this.responseEntity = responseEntity; } @@ -159,9 +157,7 @@ protected WebResource getWebResource() { protected ClientResponse getResponse(WebResource resource, String accept) { ClientResponse response = Mockito.mock(ClientResponse.class); when(response.getStatus()).thenReturn(statusCode); - when(response.getEntityInputStream()).thenReturn(new ByteArrayInputStream( - responseEntity.getBytes(StandardCharsets.UTF_8) - )); + when(response.getEntityInputStream()).thenReturn(new ByteArrayInputStream(responseEntity.getBytes(StandardCharsets.UTF_8))); return response; } } diff --git a/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryConnectionMgr.java b/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryConnectionMgr.java index 95a79995cd..9bcfa13dd9 100644 --- a/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryConnectionMgr.java +++ b/plugin-nifi-registry/src/test/java/org/apache/ranger/services/nifi/registry/client/TestNiFiRegistryConnectionMgr.java @@ -26,23 +26,22 @@ import java.util.Map; public class TestNiFiRegistryConnectionMgr { - - @Test (expected = IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testValidURLWithWrongEndPoint() throws Exception { final String nifiRegistryUrl = "http://localhost:18080/nifi-registry"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.NONE.name()); NiFiRegistryConnectionMgr.getNiFiRegistryClient("nifi-registry", configs); } - @Test (expected = IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testInvalidURL() throws Exception { final String nifiRegistryUrl = "not a url"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.NONE.name()); @@ -53,7 +52,7 @@ public void testInvalidURL() throws Exception { public void testAuthTypeNone() throws Exception { final String nifiRegistryUrl = "http://localhost:18080/nifi-registry-api/policies/resources"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.NONE.name()); @@ -65,7 +64,7 @@ public void testAuthTypeNone() throws Exception { @Test(expected = IllegalArgumentException.class) public void testAuthTypeNoneMissingURL() throws Exception { - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, null); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.NONE.name()); @@ -76,7 +75,7 @@ public void testAuthTypeNoneMissingURL() throws Exception { public void testAuthTypeSSL() throws Exception { final String nifiRegistryUrl = "https://localhost:18080/nifi-registry-api/policies/resources"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.SSL.name()); @@ -95,7 +94,7 @@ public void testAuthTypeSSL() throws Exception { public void testAuthTypeSSLWithNonHttpsUrl() throws Exception { final String nifiRegistryUrl = "http://localhost:18080/nifi-registry-api/policies/resources"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.SSL.name()); @@ -114,11 +113,10 @@ public void testAuthTypeSSLWithNonHttpsUrl() throws Exception { public void testAuthTypeSSLMissingConfigs() throws Exception { final String nifiRegistryUrl = "http://localhost:18080/nifi-registry"; - Map configs = new HashMap<>(); + Map configs = new HashMap<>(); configs.put(NiFiRegistryConfigs.NIFI_REG_URL, nifiRegistryUrl); configs.put(NiFiRegistryConfigs.NIFI_REG_AUTHENTICATION_TYPE, NiFiRegistryAuthType.SSL.name()); NiFiRegistryConnectionMgr.getNiFiRegistryClient("nifi-registry", configs); } - } diff --git a/pom.xml b/pom.xml index 04f5ea76a8..6db6c04ad6 100644 --- a/pom.xml +++ b/pom.xml @@ -48,6 +48,7 @@ 2.2.0-b23 false 3.1.0 + true 8.29 4.2.1 1.6.4 @@ -204,6 +205,7 @@ reuseReports jacoco java + false 4.7.3.5 2.4.1 5.7.12 @@ -566,7 +568,7 @@ org.apache.rat apache-rat-plugin - 0.11 + 0.16.1 false @@ -577,6 +579,7 @@ **/src/main/webapp/fonts/** **/src/main/webapp/libs/** .git/** + .gitattributes/** .github/pull_request_template.md .pc/** debian/** @@ -660,14 +663,17 @@ com.github.spotbugs spotbugs-maven-plugin ${spotbugs.plugin.version} - - ./dev-support/spotbugsIncludeFile.xml - + spotbugs-check check + verify + + ${spotbugs.failOnViolation} + ./dev-support/spotbugsIncludeFile.xml + @@ -703,6 +709,7 @@ true ./dev-support/checkstyle.xml ./dev-support/checkstyle-suppressions.xml + ${checkstyle.skip} ${checkstyle.failOnViolation} @@ -778,25 +785,6 @@ https://issues.apache.org/jira/browse/ranger - - - true - - apache.snapshots.https - Apache Development Snapshot Repository - https://repository.apache.org/content/repositories/snapshots - - - - true - - - false - - apache.public.https - Apache Development Snapshot Repository - https://repository.apache.org/content/repositories/public - jetbrains-pty4j jetbrains-intellij-dependencies @@ -1165,6 +1153,7 @@ plugin-kms plugin-kudu plugin-kylin + plugin-nestedstructure plugin-nifi plugin-nifi-registry plugin-ozone diff --git a/ranger-authn/pom.xml b/ranger-authn/pom.xml index f30979fae1..7276004ef5 100644 --- a/ranger-authn/pom.xml +++ b/ranger-authn/pom.xml @@ -31,6 +31,8 @@ Ranger Authentication module + true + false UTF-8 diff --git a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuth.java b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuth.java index b0757ae7d1..a027966a7d 100644 --- a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuth.java +++ b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuth.java @@ -21,21 +21,11 @@ import org.apache.hadoop.security.authentication.server.AuthenticationToken; public class RangerAuth { - public static enum AUTH_TYPE { - JWT_JWKS("JWT-JWKS"); - - private final String authType; - - private AUTH_TYPE(String authType) { - this.authType = authType; - } - } - private String userName; - private AUTH_TYPE type; + private AuthType type; private boolean isAuthenticated; - public RangerAuth(final AuthenticationToken authenticationToken, AUTH_TYPE type) { + public RangerAuth(final AuthenticationToken authenticationToken, AuthType type) { this.userName = authenticationToken.getName(); this.isAuthenticated = true; this.type = type; @@ -49,11 +39,11 @@ public void setUserName(String userName) { this.userName = userName; } - public AUTH_TYPE getType() { + public AuthType getType() { return type; } - public void setType(AUTH_TYPE type) { + public void setType(AuthType type) { this.type = type; } @@ -64,4 +54,14 @@ public boolean isAuthenticated() { public void setAuthenticated(boolean isAuthenticated) { this.isAuthenticated = isAuthenticated; } + + public enum AuthType { + JWT_JWKS("JWT-JWKS"); + + private final String authType; + + AuthType(String authType) { + this.authType = authType; + } + } } diff --git a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuthHandler.java b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuthHandler.java index 4dcc37cc7d..8de25d3d19 100644 --- a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuthHandler.java +++ b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/RangerAuthHandler.java @@ -18,11 +18,12 @@ */ package org.apache.ranger.authz.handler; -import java.util.Properties; - import javax.servlet.http.HttpServletRequest; +import java.util.Properties; + public interface RangerAuthHandler { - void initialize(final Properties config) throws Exception; - RangerAuth authenticate(final HttpServletRequest request); + void initialize(Properties config) throws Exception; + + RangerAuth authenticate(HttpServletRequest request); } diff --git a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerDefaultJwtAuthHandler.java b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerDefaultJwtAuthHandler.java index 85339fb160..b501d133b6 100644 --- a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerDefaultJwtAuthHandler.java +++ b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerDefaultJwtAuthHandler.java @@ -18,30 +18,53 @@ */ package org.apache.ranger.authz.handler.jwt; -import javax.servlet.ServletRequest; -import javax.servlet.http.Cookie; -import javax.servlet.http.HttpServletRequest; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.security.authentication.server.AuthenticationToken; -import org.apache.ranger.authz.handler.RangerAuth; - import com.nimbusds.jose.proc.JWSKeySelector; import com.nimbusds.jose.proc.SecurityContext; import com.nimbusds.jwt.proc.ConfigurableJWTProcessor; import com.nimbusds.jwt.proc.DefaultJWTClaimsVerifier; import com.nimbusds.jwt.proc.DefaultJWTProcessor; import com.nimbusds.jwt.proc.JWTClaimsSetVerifier; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.security.authentication.server.AuthenticationToken; +import org.apache.ranger.authz.handler.RangerAuth; + +import javax.servlet.ServletRequest; +import javax.servlet.http.Cookie; +import javax.servlet.http.HttpServletRequest; /** * Default implementation of Ranger JWT authentication - * */ public class RangerDefaultJwtAuthHandler extends RangerJwtAuthHandler { - protected static final String AUTHORIZATION_HEADER = "Authorization"; protected static final String DO_AS_PARAMETER = "doAs"; + public static boolean canAuthenticateRequest(final ServletRequest request) { + HttpServletRequest httpServletRequest = (HttpServletRequest) request; + String jwtAuthHeaderStr = getJwtAuthHeader(httpServletRequest); + String jwtCookieStr = StringUtils.isBlank(jwtAuthHeaderStr) ? getJwtCookie(httpServletRequest) : null; + + return shouldProceedAuth(jwtAuthHeaderStr, jwtCookieStr); + } + + public static String getJwtAuthHeader(final HttpServletRequest httpServletRequest) { + return httpServletRequest.getHeader(AUTHORIZATION_HEADER); + } + + public static String getJwtCookie(final HttpServletRequest httpServletRequest) { + String jwtCookieStr = null; + Cookie[] cookies = httpServletRequest.getCookies(); + if (cookies != null) { + for (Cookie cookie : cookies) { + if (cookieName.equals(cookie.getName())) { + jwtCookieStr = cookie.getName() + "=" + cookie.getValue(); + break; + } + } + } + return jwtCookieStr; + } + @Override public ConfigurableJWTProcessor getJwtProcessor(JWSKeySelector keySelector) { ConfigurableJWTProcessor jwtProcessor = new DefaultJWTProcessor<>(); @@ -63,35 +86,9 @@ public RangerAuth authenticate(HttpServletRequest httpServletRequest) { AuthenticationToken authenticationToken = authenticate(jwtAuthHeaderStr, jwtCookieStr, doAsUser); if (authenticationToken != null) { - rangerAuth = new RangerAuth(authenticationToken, RangerAuth.AUTH_TYPE.JWT_JWKS); + rangerAuth = new RangerAuth(authenticationToken, RangerAuth.AuthType.JWT_JWKS); } return rangerAuth; } - - public static boolean canAuthenticateRequest(final ServletRequest request) { - HttpServletRequest httpServletRequest = (HttpServletRequest) request; - String jwtAuthHeaderStr = getJwtAuthHeader(httpServletRequest); - String jwtCookieStr = StringUtils.isBlank(jwtAuthHeaderStr) ? getJwtCookie(httpServletRequest) : null; - - return shouldProceedAuth(jwtAuthHeaderStr, jwtCookieStr); - } - - public static String getJwtAuthHeader(final HttpServletRequest httpServletRequest) { - return httpServletRequest.getHeader(AUTHORIZATION_HEADER); - } - - public static String getJwtCookie(final HttpServletRequest httpServletRequest) { - String jwtCookieStr = null; - Cookie[] cookies = httpServletRequest.getCookies(); - if (cookies != null) { - for (Cookie cookie : cookies) { - if (cookieName.equals(cookie.getName())) { - jwtCookieStr = cookie.getName() + "=" + cookie.getValue(); - break; - } - } - } - return jwtCookieStr; - } } diff --git a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerJwtAuthHandler.java b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerJwtAuthHandler.java index 17063cedfb..05cd5da6a5 100644 --- a/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerJwtAuthHandler.java +++ b/ranger-authn/src/main/java/org/apache/ranger/authz/handler/jwt/RangerJwtAuthHandler.java @@ -18,20 +18,6 @@ */ package org.apache.ranger.authz.handler.jwt; -import java.net.URL; -import java.text.ParseException; -import java.util.Arrays; -import java.util.Date; -import java.util.List; -import java.util.Properties; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.security.authentication.server.AuthenticationToken; -import org.apache.hadoop.security.authentication.util.CertificateUtil; -import org.apache.ranger.authz.handler.RangerAuthHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.nimbusds.jose.JOSEException; import com.nimbusds.jose.JWSObject; import com.nimbusds.jose.JWSVerifier; @@ -44,24 +30,41 @@ import com.nimbusds.jose.proc.SecurityContext; import com.nimbusds.jwt.SignedJWT; import com.nimbusds.jwt.proc.ConfigurableJWTProcessor; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.security.authentication.server.AuthenticationToken; +import org.apache.hadoop.security.authentication.util.CertificateUtil; +import org.apache.ranger.authz.handler.RangerAuthHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URL; +import java.text.ParseException; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Properties; public abstract class RangerJwtAuthHandler implements RangerAuthHandler { private static final Logger LOG = LoggerFactory.getLogger(RangerJwtAuthHandler.class); - private JWSVerifier verifier = null; - private String jwksProviderUrl = null; - public static final String TYPE = "ranger-jwt"; // Constant that identifies the authentication mechanism. - public static final String KEY_PROVIDER_URL = "jwks.provider-url"; // JWKS provider URL - public static final String KEY_JWT_PUBLIC_KEY = "jwt.public-key"; // JWT token provider public key - public static final String KEY_JWT_COOKIE_NAME = "jwt.cookie-name"; // JWT cookie name - public static final String KEY_JWT_AUDIENCES = "jwt.audiences"; - public static final String JWT_AUTHZ_PREFIX = "Bearer "; - - protected List audiences = null; - protected JWKSource keySource = null; + public static final String TYPE = "ranger-jwt"; // Constant that identifies the authentication mechanism. + public static final String KEY_PROVIDER_URL = "jwks.provider-url"; // JWKS provider URL + public static final String KEY_JWT_PUBLIC_KEY = "jwt.public-key"; // JWT token provider public key + public static final String KEY_JWT_COOKIE_NAME = "jwt.cookie-name"; // JWT cookie name + public static final String KEY_JWT_AUDIENCES = "jwt.audiences"; + public static final String JWT_AUTHZ_PREFIX = "Bearer "; protected static String cookieName = "hadoop-jwt"; + protected List audiences; + protected JWKSource keySource; + private JWSVerifier verifier; + private String jwksProviderUrl; + + public static boolean shouldProceedAuth(final String authHeader, final String jwtCookie) { + return (StringUtils.isNotBlank(authHeader) && authHeader.startsWith(JWT_AUTHZ_PREFIX)) || (StringUtils.isNotBlank(jwtCookie) && jwtCookie.startsWith(cookieName)); + } + @Override public void initialize(final Properties config) throws Exception { if (LOG.isDebugEnabled()) { @@ -71,7 +74,7 @@ public void initialize(final Properties config) throws Exception { // mandatory configurations jwksProviderUrl = config.getProperty(KEY_PROVIDER_URL); if (!StringUtils.isBlank(jwksProviderUrl)) { - keySource = new RemoteJWKSet<>(new URL(jwksProviderUrl)); + keySource = new RemoteJWKSet<>(new URL(jwksProviderUrl)); } // optional configurations @@ -81,8 +84,8 @@ public void initialize(final Properties config) throws Exception { if (StringUtils.isNotBlank(pemPublicKey)) { verifier = new RSASSAVerifier(CertificateUtil.parseRSAPublicKey(pemPublicKey)); } else if (StringUtils.isBlank(jwksProviderUrl)) { - throw new Exception("RangerJwtAuthHandler: Mandatory configs ('jwks.provider-url' & 'jwt.public-key') are missing, must provide atleast one."); - } + throw new Exception("RangerJwtAuthHandler: Mandatory configs ('jwks.provider-url' & 'jwt.public-key') are missing, must provide atleast one."); + } // setup custom cookie name if configured String customCookieName = config.getProperty(KEY_JWT_COOKIE_NAME); @@ -101,6 +104,8 @@ public void initialize(final Properties config) throws Exception { } } + public abstract ConfigurableJWTProcessor getJwtProcessor(JWSKeySelector keySelector); + protected AuthenticationToken authenticate(final String jwtAuthHeader, final String jwtCookie, final String doAsUser) { if (LOG.isDebugEnabled()) { LOG.debug("===>>> RangerJwtAuthHandler.authenticate()"); @@ -250,8 +255,6 @@ protected boolean validateSignature(final SignedJWT jwtToken) { return valid; } - public abstract ConfigurableJWTProcessor getJwtProcessor(final JWSKeySelector keySelector); - /** * Validate whether any of the accepted audience claims is present in the issued * token claims list for audience. Override this method in subclasses in order @@ -316,8 +319,4 @@ protected boolean validateExpiration(final SignedJWT jwtToken) { return valid; } - - public static boolean shouldProceedAuth(final String authHeader, final String jwtCookie) { - return (StringUtils.isNotBlank(authHeader) && authHeader.startsWith(JWT_AUTHZ_PREFIX)) || (StringUtils.isNotBlank(jwtCookie) && jwtCookie.startsWith(cookieName)); - } } diff --git a/ranger-examples/dev-support/findbugsIncludeFile.xml b/ranger-examples/dev-support/findbugsIncludeFile.xml deleted file mode 100644 index 8623906bda..0000000000 --- a/ranger-examples/dev-support/findbugsIncludeFile.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - diff --git a/ranger-metrics/pom.xml b/ranger-metrics/pom.xml index 60869cff67..1f87d8e1cf 100644 --- a/ranger-metrics/pom.xml +++ b/ranger-metrics/pom.xml @@ -31,6 +31,8 @@ Ranger Metrics module + true + false UTF-8 diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/RangerMetricsSystemWrapper.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/RangerMetricsSystemWrapper.java index ba40b11618..66244bf5c6 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/RangerMetricsSystemWrapper.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/RangerMetricsSystemWrapper.java @@ -19,15 +19,6 @@ package org.apache.ranger.metrics; -import java.io.IOException; -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.ranger.metrics.sink.RangerMetricsJsonSink; @@ -39,22 +30,32 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + public class RangerMetricsSystemWrapper { private static final Logger LOG = LoggerFactory.getLogger(RangerMetricsSystemWrapper.class); private RangerMetricsPrometheusSink rangerMetricsPrometheusSink; - private RangerMetricsJsonSink rangerMetricsJsonSink; + private RangerMetricsJsonSink rangerMetricsJsonSink; /** * Initialized metric system. + * * @param serviceName * @param sourceWrappers * @param sinkWrappers */ public void init(String serviceName, List sourceWrappers, List sinkWrappers) { // Initialize metrics system - MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serviceName); - Set sourceContexts = new HashSet(); + MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serviceName); + Set sourceContexts = new HashSet(); sourceContexts.add(serviceName); // Ranger Source @@ -64,7 +65,7 @@ public void init(String serviceName, List sourceWrap sourceWrappers.add(new RangerMetricsSourceWrapper("RangerJVM", "Ranger common metric source (RangerMetricsJvmSource)", serviceName, new RangerMetricsJvmSource(serviceName))); sourceWrappers.add(new RangerMetricsSourceWrapper("RangerContainer", "Ranger web container metric source (RangerMetricsContainerSource)", serviceName, new RangerMetricsContainerSource(serviceName))); - for (RangerMetricsSourceWrapper sourceWrapper: sourceWrappers) { + for (RangerMetricsSourceWrapper sourceWrapper : sourceWrappers) { metricsSystem.register(sourceWrapper.getName(), sourceWrapper.getDescription(), sourceWrapper.getSource()); sourceContexts.add(sourceWrapper.getContext()); } @@ -82,11 +83,11 @@ public void init(String serviceName, List sourceWrap rangerMetricsJsonSink = new RangerMetricsJsonSink(sourceContexts); sinkWrappers.add(new RangerMetricsSinkWrapper("Json", "Ranger common metric sink (RangerMetricsJsonSink)", rangerMetricsJsonSink)); - for (RangerMetricsSinkWrapper sinkWrapper: sinkWrappers) { + for (RangerMetricsSinkWrapper sinkWrapper : sinkWrappers) { metricsSystem.register(sinkWrapper.getName(), sinkWrapper.getDescription(), sinkWrapper.getSink()); } - LOG.info("===>> Ranger Metric system initialized successfully."); + LOG.info("Ranger Metric system initialized successfully!"); } public String getRangerMetricsInPrometheusFormat() throws IOException { diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsJsonSink.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsJsonSink.java index 4e23120ddf..b0b6761536 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsJsonSink.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsJsonSink.java @@ -19,11 +19,6 @@ package org.apache.ranger.metrics.sink; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - import org.apache.commons.configuration2.SubsetConfiguration; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricType; @@ -32,10 +27,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + public class RangerMetricsJsonSink implements MetricsSink { private static final Logger LOG = LoggerFactory.getLogger(RangerMetricsJsonSink.class); - private final Set contexts; + private final Set contexts; private final Map> metricsJson = new HashMap<>(); public RangerMetricsJsonSink(Set contexts) { @@ -44,7 +44,7 @@ public RangerMetricsJsonSink(Set contexts) { @Override public void init(SubsetConfiguration conf) { - // Implementation not needed + // Implementation not needed } @Override @@ -53,11 +53,11 @@ public void putMetrics(MetricsRecord metricsRecord) { if (contexts.contains(metricsRecord.context())) { for (AbstractMetric metrics : metricsRecord.metrics()) { if (metrics.type() == MetricType.COUNTER || metrics.type() == MetricType.GAUGE) { - String recordName = metricsRecord.name(); - Map record = metricsJson.get(recordName); + String recordName = metricsRecord.name(); + Map record = metricsJson.get(recordName); if (Objects.isNull(record)) { - record = new HashMap<> (); + record = new HashMap<>(); } record.put(metrics.name(), metrics.value()); @@ -65,18 +65,16 @@ record = new HashMap<> (); } } } else { - if (LOG.isDebugEnabled()) { - LOG.debug("=== RangerMetricsJsonSink:putMetrics(): skipping... "+ metricsRecord.context()); - } + LOG.debug("<=== RangerMetricsJsonSink:putMetrics({}): skipping... ", metricsRecord.context()); } } catch (Exception e) { - LOG.error("Exception occured while converting metrics into json.", e); + LOG.error("Exception occured while converting metrics into json", e); } } @Override public void flush() { - // Implementation not needed + // Implementation not needed } public Map> getMetrics() { diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsPrometheusSink.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsPrometheusSink.java index 141fa372a1..e534a590fc 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsPrometheusSink.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/sink/RangerMetricsPrometheusSink.java @@ -19,6 +19,14 @@ package org.apache.ranger.metrics.sink; +import org.apache.commons.configuration2.SubsetConfiguration; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.metrics2.MetricType; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsSink; +import org.apache.hadoop.metrics2.MetricsTag; + import java.io.IOException; import java.io.Writer; import java.util.Collections; @@ -28,14 +36,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Pattern; -import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricType; -import org.apache.hadoop.metrics2.MetricsRecord; -import org.apache.hadoop.metrics2.MetricsSink; -import org.apache.hadoop.metrics2.MetricsTag; - public class RangerMetricsPrometheusSink implements MetricsSink { private static final Pattern SPLIT_PATTERN = Pattern.compile("(? metricLines = new ConcurrentHashMap<>(); - private final Set contexts; + private final Set contexts; public RangerMetricsPrometheusSink(Set metricsContexts) { if (Objects.isNull(metricsContexts)) { @@ -64,7 +64,6 @@ public void putMetrics(MetricsRecord metricsRecord) { if (contexts.contains(metricsRecord.context())) { for (AbstractMetric metrics : metricsRecord.metrics()) { if (metrics.type() == MetricType.COUNTER || metrics.type() == MetricType.GAUGE) { - String key = prometheusName(metricsRecord.name(), metrics.name()); StringBuilder builder = new StringBuilder(); @@ -87,7 +86,7 @@ public void putMetrics(MetricsRecord metricsRecord) { metricLines.put(key, builder.toString()); } } - } + } } /** diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsContainerSource.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsContainerSource.java index d380d9e66e..2aa0a389d1 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsContainerSource.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsContainerSource.java @@ -20,64 +20,48 @@ package org.apache.ranger.metrics.source; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.ranger.metrics.RangerMetricsInfo; import org.apache.ranger.server.tomcat.EmbeddedServer; import org.apache.ranger.server.tomcat.EmbeddedServerMetricsCollector; -import org.apache.hadoop.metrics2.MetricsCollector; import java.util.Objects; public class RangerMetricsContainerSource extends RangerMetricsSource { - + private final String context; private EmbeddedServerMetricsCollector embeddedServerMetricsCollector; - private long maxConnections; - private int acceptCount; - private long activeConnectionsCount; - private int maxContainersThreadCount; - private int minSpareThreadsCount; - private int activeContainerThreadsCount; - private int totalContainerThreadsCount; - private long connectionTimeout; - private long keepAliveTimeout; - private final String context; - public RangerMetricsContainerSource(String context) { - this.context = context; + this.context = context; this.embeddedServerMetricsCollector = EmbeddedServer.getServerMetricsCollector(); } @Override protected void refresh() { - - if(Objects.nonNull(this.embeddedServerMetricsCollector)) - { - this.maxConnections = embeddedServerMetricsCollector.getMaxAllowedConnection(); - this.acceptCount = embeddedServerMetricsCollector.getConnectionAcceptCount(); - this.activeConnectionsCount = embeddedServerMetricsCollector.getActiveConnectionCount(); - this.maxContainersThreadCount = embeddedServerMetricsCollector.getMaxContainerThreadsCount(); - this.minSpareThreadsCount = embeddedServerMetricsCollector.getMinSpareContainerThreadsCount(); + if (Objects.nonNull(this.embeddedServerMetricsCollector)) { + this.maxConnections = embeddedServerMetricsCollector.getMaxAllowedConnection(); + this.acceptCount = embeddedServerMetricsCollector.getConnectionAcceptCount(); + this.activeConnectionsCount = embeddedServerMetricsCollector.getActiveConnectionCount(); + this.maxContainersThreadCount = embeddedServerMetricsCollector.getMaxContainerThreadsCount(); + this.minSpareThreadsCount = embeddedServerMetricsCollector.getMinSpareContainerThreadsCount(); this.activeContainerThreadsCount = embeddedServerMetricsCollector.getActiveContainerThreadsCount(); - this.connectionTimeout = embeddedServerMetricsCollector.getConnectionTimeout(); - this.keepAliveTimeout = embeddedServerMetricsCollector.getKeepAliveTimeout(); - this.totalContainerThreadsCount = embeddedServerMetricsCollector.getTotalContainerThreadsCount(); + this.connectionTimeout = embeddedServerMetricsCollector.getConnectionTimeout(); + this.keepAliveTimeout = embeddedServerMetricsCollector.getKeepAliveTimeout(); + this.totalContainerThreadsCount = embeddedServerMetricsCollector.getTotalContainerThreadsCount(); } - - } @Override protected void update(MetricsCollector collector, boolean all) { - collector.addRecord("RangerWebContainer") .setContext(this.context) .addCounter(new RangerMetricsInfo("MaxConnectionsCount", "Ranger max configured container connections"), this.maxConnections) @@ -92,8 +76,7 @@ protected void update(MetricsCollector collector, boolean all) { } @VisibleForTesting - void setEmbeddedServerMetricsCollector( EmbeddedServerMetricsCollector embeddedServerMetricsCollector ){ + void setEmbeddedServerMetricsCollector(EmbeddedServerMetricsCollector embeddedServerMetricsCollector) { this.embeddedServerMetricsCollector = embeddedServerMetricsCollector; } - } diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsJvmSource.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsJvmSource.java index 2ea27c4113..cb9eca7c8f 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsJvmSource.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsJvmSource.java @@ -19,31 +19,30 @@ package org.apache.ranger.metrics.source; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.ranger.metrics.RangerMetricsInfo; + import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.lang.management.OperatingSystemMXBean; -import java.lang.management.ThreadMXBean; import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; import java.util.Objects; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.ranger.metrics.RangerMetricsInfo; - public class RangerMetricsJvmSource extends RangerMetricsSource { - private long memoryCurrent; - private long memoryMaximum; - private long gcCountTotal; - private long gcTimeTotal; - private long gcTimeMax; - private int threadsBusy; - private int threadsBlocked; - private int threadsWaiting; - private int threadsRemaining; - private int processorsAvailable; - private float systemLoadAverage; - private final String context; + private long memoryCurrent; + private long memoryMaximum; + private long gcCountTotal; + private long gcTimeTotal; + private long gcTimeMax; + private int threadsBusy; + private int threadsBlocked; + private int threadsWaiting; + private int threadsRemaining; + private int processorsAvailable; + private float systemLoadAverage; public RangerMetricsJvmSource(String context) { this.context = context; @@ -56,44 +55,53 @@ protected void refresh() { memoryCurrent = memoryMXBean.getHeapMemoryUsage().getUsed(); memoryMaximum = memoryMXBean.getHeapMemoryUsage().getCommitted(); - // Threads - // reset - threadsBusy = threadsBlocked = threadsWaiting = threadsRemaining = 0; + // reset Threads + threadsBusy = 0; + threadsBlocked = 0; + threadsWaiting = 0; + threadsRemaining = 0; ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); for (Long threadID : threadMXBean.getAllThreadIds()) { ThreadInfo threadInfo = threadMXBean.getThreadInfo(threadID, 0); if (Objects.nonNull(threadInfo)) { - switch(threadInfo.getThreadState().toString()) { - case "RUNNABLE": threadsBusy++; break; - case "BLOCKED" : threadsBlocked++; break; - case "WAITING" : threadsWaiting++; break; - - default : threadsRemaining++; break; + switch (threadInfo.getThreadState().toString()) { + case "RUNNABLE": + threadsBusy++; + break; + case "BLOCKED": + threadsBlocked++; + break; + case "WAITING": + threadsWaiting++; + break; + + default: + threadsRemaining++; + break; } } } // Load OperatingSystemMXBean osMXBean = ManagementFactory.getOperatingSystemMXBean(); - systemLoadAverage = (float) osMXBean.getSystemLoadAverage(); + systemLoadAverage = (float) osMXBean.getSystemLoadAverage(); processorsAvailable = osMXBean.getAvailableProcessors(); // GC long totalGarbageCollections = 0; - long garbageCollectionTime = 0; - - for(GarbageCollectorMXBean gc : ManagementFactory.getGarbageCollectorMXBeans()) { + long garbageCollectionTime = 0; + for (GarbageCollectorMXBean gc : ManagementFactory.getGarbageCollectorMXBeans()) { long count = gc.getCollectionCount(); - if(count >= 0) { + if (count >= 0) { totalGarbageCollections += count; } long time = gc.getCollectionTime(); - if(time >= 0) { + if (time >= 0) { garbageCollectionTime += time; } @@ -103,23 +111,23 @@ protected void refresh() { } gcCountTotal = totalGarbageCollections; - gcTimeTotal = garbageCollectionTime; + gcTimeTotal = garbageCollectionTime; } @Override protected void update(MetricsCollector collector, boolean all) { collector.addRecord("RangerJvm") - .setContext(this.context) - .addGauge(new RangerMetricsInfo("MemoryCurrent", "Ranger current memory utilization"), memoryCurrent) - .addGauge(new RangerMetricsInfo("MemoryMax", "Ranger max memory utilization"), memoryMaximum) - .addGauge(new RangerMetricsInfo("GcCountTotal", "Ranger app total GCs"), gcCountTotal) - .addGauge(new RangerMetricsInfo("GcTimeTotal", "Ranger app total GC time"), gcTimeTotal) - .addGauge(new RangerMetricsInfo("GcTimeMax", "Ranger app MAX GC time"), gcTimeMax) - .addGauge(new RangerMetricsInfo("ThreadsBusy", "Ranger busy threads"), threadsBusy) - .addGauge(new RangerMetricsInfo("ThreadsBlocked", "Ranger blocked threads"), threadsBlocked) - .addGauge(new RangerMetricsInfo("ThreadsWaiting", "Ranger waiting threads"), threadsWaiting) - .addGauge(new RangerMetricsInfo("ThreadsRemaining", "Ranger remaining threads"), threadsRemaining) - .addGauge(new RangerMetricsInfo("ProcessorsAvailable", "Ranger Processors available"), processorsAvailable) - .addGauge(new RangerMetricsInfo("SystemLoadAvg", "Ranger System Load Average"), systemLoadAverage); + .setContext(this.context) + .addGauge(new RangerMetricsInfo("MemoryCurrent", "Ranger current memory utilization"), memoryCurrent) + .addGauge(new RangerMetricsInfo("MemoryMax", "Ranger max memory utilization"), memoryMaximum) + .addGauge(new RangerMetricsInfo("GcCountTotal", "Ranger app total GCs"), gcCountTotal) + .addGauge(new RangerMetricsInfo("GcTimeTotal", "Ranger app total GC time"), gcTimeTotal) + .addGauge(new RangerMetricsInfo("GcTimeMax", "Ranger app MAX GC time"), gcTimeMax) + .addGauge(new RangerMetricsInfo("ThreadsBusy", "Ranger busy threads"), threadsBusy) + .addGauge(new RangerMetricsInfo("ThreadsBlocked", "Ranger blocked threads"), threadsBlocked) + .addGauge(new RangerMetricsInfo("ThreadsWaiting", "Ranger waiting threads"), threadsWaiting) + .addGauge(new RangerMetricsInfo("ThreadsRemaining", "Ranger remaining threads"), threadsRemaining) + .addGauge(new RangerMetricsInfo("ProcessorsAvailable", "Ranger Processors available"), processorsAvailable) + .addGauge(new RangerMetricsInfo("SystemLoadAvg", "Ranger System Load Average"), systemLoadAverage); } } diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsSource.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsSource.java index 8bf051eba2..3e18c4870d 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsSource.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/source/RangerMetricsSource.java @@ -23,7 +23,6 @@ import org.apache.hadoop.metrics2.MetricsSource; public abstract class RangerMetricsSource implements MetricsSource { - @Override public void getMetrics(MetricsCollector collector, boolean all) { refresh(); @@ -33,12 +32,13 @@ public void getMetrics(MetricsCollector collector, boolean all) { /** * Responsibility of this method is to refresh metrics hold by this class. */ - abstract protected void refresh(); + protected abstract void refresh(); /** * Responsibility of this method is to update metrics system with latest values of ranger metrics. + * * @param collector * @param all */ - abstract protected void update(MetricsCollector collector, boolean all); + protected abstract void update(MetricsCollector collector, boolean all); } diff --git a/ranger-metrics/src/main/java/org/apache/ranger/metrics/wrapper/RangerMetricsSinkWrapper.java b/ranger-metrics/src/main/java/org/apache/ranger/metrics/wrapper/RangerMetricsSinkWrapper.java index db1923d390..b84f1d13e4 100644 --- a/ranger-metrics/src/main/java/org/apache/ranger/metrics/wrapper/RangerMetricsSinkWrapper.java +++ b/ranger-metrics/src/main/java/org/apache/ranger/metrics/wrapper/RangerMetricsSinkWrapper.java @@ -22,14 +22,14 @@ import org.apache.hadoop.metrics2.MetricsSink; public class RangerMetricsSinkWrapper { - private final String name; - private final String description; + private final String name; + private final String description; private final MetricsSink sink; public RangerMetricsSinkWrapper(String name, String description, MetricsSink sink) { - this.name = name; + this.name = name; this.description = description; - this.sink=sink; + this.sink = sink; } public String getName() { diff --git a/ranger-metrics/src/test/java/org/apache/ranger/metrics/source/TestRangerMetricsContainerSource.java b/ranger-metrics/src/test/java/org/apache/ranger/metrics/source/TestRangerMetricsContainerSource.java index d802c5dba5..3ba03764a4 100644 --- a/ranger-metrics/src/test/java/org/apache/ranger/metrics/source/TestRangerMetricsContainerSource.java +++ b/ranger-metrics/src/test/java/org/apache/ranger/metrics/source/TestRangerMetricsContainerSource.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,7 +22,12 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.ranger.metrics.RangerMetricsSystemWrapper; import org.apache.ranger.server.tomcat.EmbeddedServerMetricsCollector; -import org.junit.*; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; import java.util.List; @@ -30,23 +35,19 @@ import static org.mockito.Mockito.when; public class TestRangerMetricsContainerSource { - - private static final String CONTAINER_METRIC_SOURCE_NAME = "RangerContainer"; - private static RangerMetricsSystemWrapper rangerMetricsSystemWrapper; - + private static final String CONTAINER_METRIC_SOURCE_NAME = "RangerContainer"; + private static RangerMetricsSystemWrapper rangerMetricsSystemWrapper; + private static MetricsSystem metricsSystem; private EmbeddedServerMetricsCollector embeddedServerMetricsCollector; - private static MetricsSystem metricsSystem; - - public TestRangerMetricsContainerSource(){ + public TestRangerMetricsContainerSource() { } @BeforeClass - public static void init(){ - - metricsSystem = DefaultMetricsSystem.instance(); + public static void init() { + metricsSystem = DefaultMetricsSystem.instance(); TestRangerMetricsContainerSource.rangerMetricsSystemWrapper = new RangerMetricsSystemWrapper(); - TestRangerMetricsContainerSource.rangerMetricsSystemWrapper.init("test", null, (List)null); + TestRangerMetricsContainerSource.rangerMetricsSystemWrapper.init("test", null, (List) null); } @AfterClass @@ -54,21 +55,20 @@ public static void tearDownAfterClass() { metricsSystem.shutdown(); } - // Without proper start of EmbeddedServer, embeddedServerMetricsCollector will be returned null. - // That's why, mocked instance should be injected here. + /* Without proper start of EmbeddedServer, embeddedServerMetricsCollector will return null. + That's why, mocked instance should be injected here. */ @Before - public void before(){ + public void before() { embeddedServerMetricsCollector = mock(EmbeddedServerMetricsCollector.class); - ((RangerMetricsContainerSource)DefaultMetricsSystem.instance().getSource(CONTAINER_METRIC_SOURCE_NAME)).setEmbeddedServerMetricsCollector(embeddedServerMetricsCollector); + ((RangerMetricsContainerSource) DefaultMetricsSystem.instance().getSource(CONTAINER_METRIC_SOURCE_NAME)).setEmbeddedServerMetricsCollector(embeddedServerMetricsCollector); } // Resetting it back to original state. @After - public void after(){ - ((RangerMetricsContainerSource)DefaultMetricsSystem.instance().getSource(CONTAINER_METRIC_SOURCE_NAME)).setEmbeddedServerMetricsCollector(null); + public void after() { + ((RangerMetricsContainerSource) DefaultMetricsSystem.instance().getSource(CONTAINER_METRIC_SOURCE_NAME)).setEmbeddedServerMetricsCollector(null); } - /* * Test Case: * This case verifies the tomcat metric collection when RangerMetricsContainerSource gets executed to collect the metrics. @@ -79,8 +79,7 @@ public void after(){ */ @Test - public void testContainerMetricsCollection(){ - + public void testContainerMetricsCollection() { when(embeddedServerMetricsCollector.getActiveConnectionCount()).thenReturn(1L); when(embeddedServerMetricsCollector.getMaxAllowedConnection()).thenReturn(8192L); when(embeddedServerMetricsCollector.getConnectionAcceptCount()).thenReturn(100); @@ -93,16 +92,14 @@ public void testContainerMetricsCollection(){ metricsSystem.publishMetricsNow(); - Assert.assertEquals(1L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ActiveConnectionsCount")); - Assert.assertEquals(60000L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ConnectionTimeout")); - Assert.assertEquals(200, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MaxWorkerThreadsCount")); - Assert.assertEquals(15, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("TotalWorkerThreadsCount")); - Assert.assertEquals(60000L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("KeepAliveTimeout")); - Assert.assertEquals(2, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ActiveWorkerThreadsCount")); - Assert.assertEquals(100, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ConnectionAcceptCount")); - Assert.assertEquals(10, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MinSpareWorkerThreadsCount")); - Assert.assertEquals(8192L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MaxConnectionsCount")); - + Assert.assertEquals(1L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ActiveConnectionsCount")); + Assert.assertEquals(60000L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ConnectionTimeout")); + Assert.assertEquals(200, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MaxWorkerThreadsCount")); + Assert.assertEquals(15, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("TotalWorkerThreadsCount")); + Assert.assertEquals(60000L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("KeepAliveTimeout")); + Assert.assertEquals(2, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ActiveWorkerThreadsCount")); + Assert.assertEquals(100, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("ConnectionAcceptCount")); + Assert.assertEquals(10, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MinSpareWorkerThreadsCount")); + Assert.assertEquals(8192L, rangerMetricsSystemWrapper.getRangerMetrics().get("RangerWebContainer").get("MaxConnectionsCount")); } - } diff --git a/security-admin/pom.xml b/security-admin/pom.xml index 42410fb783..7438863d89 100644 --- a/security-admin/pom.xml +++ b/security-admin/pom.xml @@ -1208,7 +1208,7 @@ Looks like there are multiple SQL files with same version number prefix. Update prefix and build again. - List sqlFilePaths = org.codehaus.plexus.util.FileUtils.getFileNames(new File("security-admin/db"), "**/*.sql", null, false); + List sqlFilePaths = org.codehaus.plexus.util.FileUtils.getFileNames(new File("${project.basedir}/db"), "**/*.sql", null, false); Map sqlFileMap = new HashMap(); Boolean noDupPrfx = true; @@ -1255,7 +1255,7 @@ Looks like there are multiple JAVA files with same version number suffix. Update suffix and build again. - List javaFilePaths = org.codehaus.plexus.util.FileUtils.getFileNames(new File("security-admin/src/main/java/org/apache/ranger/patch"), "**/*.java", null, false); + List javaFilePaths = org.codehaus.plexus.util.FileUtils.getFileNames(new File("${project.basedir}/src/main/java/org/apache/ranger/patch"), "**/*.java", null, false); Map javaFileMap = new HashMap(); Boolean noDupSuffix = true; diff --git a/security-admin/src/main/java/org/apache/ranger/biz/UserMgr.java b/security-admin/src/main/java/org/apache/ranger/biz/UserMgr.java index 07119dee39..f19c049948 100644 --- a/security-admin/src/main/java/org/apache/ranger/biz/UserMgr.java +++ b/security-admin/src/main/java/org/apache/ranger/biz/UserMgr.java @@ -1068,9 +1068,10 @@ public VXPortalUser createUser(VXPortalUser userProfile) { } public VXPortalUser createDefaultAccountUser(VXPortalUser userProfile) { - if (userProfile.getPassword() == null - || userProfile.getPassword().trim().isEmpty()) { - userProfile.setUserSource(RangerCommonEnums.USER_EXTERNAL); + if (userProfile.getUserSource() != RangerCommonEnums.USER_FEDERATED) { + if (StringUtils.isBlank(userProfile.getPassword())) { + userProfile.setUserSource(RangerCommonEnums.USER_EXTERNAL); + } } // access control checkAdminAccess(); diff --git a/security-admin/src/main/java/org/apache/ranger/biz/XUserMgr.java b/security-admin/src/main/java/org/apache/ranger/biz/XUserMgr.java index cec829361f..0350704749 100755 --- a/security-admin/src/main/java/org/apache/ranger/biz/XUserMgr.java +++ b/security-admin/src/main/java/org/apache/ranger/biz/XUserMgr.java @@ -50,6 +50,7 @@ import org.apache.ranger.plugin.model.UserInfo; import org.apache.ranger.plugin.store.EmbeddedServiceDefsUtil; import org.apache.ranger.plugin.util.RangerUserStore; +import org.apache.ranger.plugin.util.PasswordUtils.PasswordGenerator; import org.apache.ranger.service.*; import org.apache.ranger.ugsyncutil.model.GroupUserInfo; import org.apache.ranger.ugsyncutil.model.UsersGroupRoleAssignments; @@ -110,6 +111,7 @@ public class XUserMgr extends XUserMgrBase { private static final String USER = "User"; private static final String GROUP = "Group"; private static final int MAX_DB_TRANSACTION_RETRIES = 5; + private static final int PASSWORD_LENGTH = 16; @Autowired RangerBizUtil msBizUtil; @@ -187,6 +189,20 @@ public VXGroup getGroupByGroupName(String groupName) { public VXUser createXUser(VXUser vXUser) { checkAdminAccess(); xaBizUtil.blockAuditorRoleUser(); + + if (vXUser.getUserSource() == RangerCommonEnums.USER_FEDERATED) { + if (StringUtils.isEmpty(vXUser.getPassword())) { + PasswordGenerator passwordGenerator = new PasswordGenerator.PasswordGeneratorBuilder() + .useLower(true) + .useUpper(true) + .useDigits(true) + .useSymbols(true) + .build(); + String passWd = passwordGenerator.generate(PASSWORD_LENGTH); + vXUser.setPassword(passWd); + } + } + validatePassword(vXUser); String userName = vXUser.getName(); String firstName = vXUser.getFirstName(); diff --git a/security-admin/src/test/java/org/apache/ranger/biz/TestXUserMgr.java b/security-admin/src/test/java/org/apache/ranger/biz/TestXUserMgr.java index 647891ef36..2da5d3cd8d 100644 --- a/security-admin/src/test/java/org/apache/ranger/biz/TestXUserMgr.java +++ b/security-admin/src/test/java/org/apache/ranger/biz/TestXUserMgr.java @@ -278,6 +278,23 @@ private VXUser vxUser() { return vxUser; } + private VXUser vxUserFederated() { + Collection userRoleList = new ArrayList(); + userRoleList.add("ROLE_USER"); + Collection groupNameList = new ArrayList(); + groupNameList.add(groupName); + VXUser vxUser = new VXUser(); + vxUser.setId(userId); + vxUser.setDescription("group test working"); + vxUser.setName(userLoginID); + vxUser.setUserRoleList(userRoleList); + vxUser.setGroupNameList(groupNameList); + vxUser.setPassword(null); + vxUser.setEmailAddress("test@test.com"); + vxUser.setUserSource(RangerCommonEnums.USER_FEDERATED); + return vxUser; + } + private XXUser xxUser(VXUser vxUser) { XXUser xXUser = new XXUser(); xXUser.setId(userId); @@ -4655,4 +4672,67 @@ public void test132CreateExternalUser() { Assert.assertNotNull(createdXUser); Assert.assertEquals(createdXUser.getName(), vXUser.getName()); } + + @Test + public void test01CreateXUser_federated() { + destroySession(); + setup(); + VXUser vxUser = vxUserFederated(); + vxUser.setFirstName("user12"); + vxUser.setLastName("test12"); + Collection groupIdList = new ArrayList(); + groupIdList.add(userId); + vxUser.setGroupIdList(groupIdList); + VXGroup vxGroup = vxGroup(); + vxGroup.setName("user12Grp"); + VXGroupUser vXGroupUser = new VXGroupUser(); + vXGroupUser.setParentGroupId(userId); + vXGroupUser.setUserId(userId); + vXGroupUser.setName(vxGroup.getName()); + Mockito.when(xGroupService.readResource(userId)).thenReturn(vxGroup); + Mockito.when(xGroupUserService.createResource((VXGroupUser) Mockito.any())).thenReturn(vXGroupUser); + ArrayList userRoleListVXPortaUser = getRoleList(); + VXPortalUser vXPortalUser = new VXPortalUser(); + vXPortalUser.setUserRoleList(userRoleListVXPortaUser); + Mockito.when(xUserService.createResource(vxUser)).thenReturn(vxUser); + XXModuleDefDao value = Mockito.mock(XXModuleDefDao.class); + Mockito.when(daoManager.getXXModuleDef()).thenReturn(value); + Mockito.when(userMgr.createDefaultAccountUser((VXPortalUser) Mockito.any())).thenReturn(vXPortalUser); + Mockito.when(stringUtil.validateEmail("test@test.com")).thenReturn(true); + VXUser dbUser = xUserMgr.createXUser(vxUser); + Assert.assertNotNull(dbUser); + userId = dbUser.getId(); + Assert.assertEquals(userId, dbUser.getId()); + Assert.assertEquals(dbUser.getDescription(), vxUser.getDescription()); + Assert.assertEquals(dbUser.getName(), vxUser.getName()); + Assert.assertEquals(dbUser.getUserRoleList(), vxUser.getUserRoleList()); + Assert.assertEquals(dbUser.getGroupNameList(), + vxUser.getGroupNameList()); + Assert.assertNotNull(dbUser.getPassword()); + Assert.assertEquals(dbUser.getUserSource(), RangerCommonEnums.USER_FEDERATED); + Mockito.verify(xUserService).createResource(vxUser); + Mockito.when(xUserService.readResourceWithOutLogin(userId)).thenReturn(vxUser); + + VXUser loggedInUser = vxUser(); + List loggedInUserRole = new ArrayList(); + loggedInUserRole.add(RangerConstants.ROLE_ADMIN); + loggedInUser.setId(8L); + loggedInUser.setName("testuser"); + loggedInUser.setUserRoleList(loggedInUserRole); + Mockito.when(xUserService.getXUserByUserName("admin")).thenReturn(loggedInUser); + Mockito.when(restErrorUtil.createRESTException(HttpServletResponse.SC_FORBIDDEN, "Logged-In user is not allowed to access requested user data", true)).thenThrow(new WebApplicationException()); + thrown.expect(WebApplicationException.class); + VXUser dbvxUser = xUserMgr.getXUser(userId); + Mockito.verify(userMgr).createDefaultAccountUser((VXPortalUser) Mockito.any()); + Assert.assertNotNull(dbvxUser); + Assert.assertEquals(userId, dbvxUser.getId()); + Assert.assertEquals(dbvxUser.getDescription(), vxUser.getDescription()); + Assert.assertEquals(dbvxUser.getName(), vxUser.getName()); + Assert.assertEquals(dbvxUser.getUserRoleList(),vxUser.getUserRoleList()); + Assert.assertEquals(dbvxUser.getGroupIdList(),vxUser.getGroupIdList()); + Assert.assertEquals(dbvxUser.getGroupNameList(),vxUser.getGroupNameList()); + Assert.assertNotNull(dbvxUser.getPassword()); + Assert.assertEquals(dbvxUser.getUserSource(), RangerCommonEnums.USER_FEDERATED); + Mockito.verify(xUserService).readResourceWithOutLogin(userId); + } }