Skip to content

Commit

Permalink
Merge pull request #3961 from atlanhq/PLT-3019-master
Browse files Browse the repository at this point in the history
PLT-3019 Change log level from info to debug on startup and init methods #3940
  • Loading branch information
sriram-atlan authored Jan 10, 2025
2 parents ea81bca + 5202218 commit 338c47e
Show file tree
Hide file tree
Showing 10 changed files with 33 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ public class RedisServiceImpl extends AbstractRedisService{
public void init() throws AtlasException {
redisClient = Redisson.create(getProdConfig());
redisCacheClient = Redisson.create(getCacheImplConfig());
LOG.info("Sentinel redis client created successfully.");
LOG.debug("Sentinel redis client created successfully.");
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public static void configureTxLogBasedIndexRecovery() {

updateGlobalConfiguration(properties);

LOG.info("Tx Log-based Index Recovery: {}!", recoveryEnabled ? "Enabled" : "Disabled");
LOG.debug("Tx Log-based Index Recovery: {}!", recoveryEnabled ? "Enabled" : "Disabled");
} catch (Exception e) {
LOG.error("Error: Failed!", e);
}
Expand All @@ -243,7 +243,7 @@ private static void updateGlobalConfiguration(Map<String, Object> map) {
managementSystem.set(entry.getKey(), entry.getValue());
}

LOG.info("Global properties updated!: {}", map);
LOG.debug("Global properties updated!: {}", map);
} catch (Exception ex) {
LOG.error("Error updating global configuration: {}", map, ex);
} finally {
Expand Down Expand Up @@ -358,7 +358,7 @@ public AtlasGraph<AtlasJanusVertex, AtlasJanusEdge> getGraphBulkLoading() {
}

private static void startEmbeddedSolr() throws AtlasException {
LOG.info("==> startEmbeddedSolr()");
LOG.debug("==> startEmbeddedSolr()");

try {
Class<?> localSolrRunnerClz = Class.forName("org.apache.atlas.runner.LocalSolrRunner");
Expand All @@ -371,11 +371,11 @@ private static void startEmbeddedSolr() throws AtlasException {
throw new AtlasException("startEmbeddedSolr(): failed", excp);
}

LOG.info("<== startEmbeddedSolr()");
LOG.debug("<== startEmbeddedSolr()");
}

private static void stopEmbeddedSolr() throws AtlasException {
LOG.info("==> stopEmbeddedSolr()");
LOG.debug("==> stopEmbeddedSolr()");

try {
Class<?> localSolrRunnerClz = Class.forName("org.apache.atlas.runner.LocalSolrRunner");
Expand All @@ -388,7 +388,7 @@ private static void stopEmbeddedSolr() throws AtlasException {
throw new AtlasException("stopEmbeddedSolr(): failed", excp);
}

LOG.info("<== stopEmbeddedSolr()");
LOG.debug("<== stopEmbeddedSolr()");
}

public static boolean isEmbeddedSolr() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ public class KafkaNotification extends AbstractNotification implements Service {
public KafkaNotification(Configuration applicationProperties) throws AtlasException {
super(applicationProperties);

LOG.info("==> KafkaNotification()");
LOG.debug("==> KafkaNotification()");

Configuration kafkaConf = ApplicationProperties.getSubsetConfiguration(applicationProperties, PROPERTY_PREFIX);

Expand Down Expand Up @@ -138,7 +138,7 @@ public KafkaNotification(Configuration applicationProperties) throws AtlasExcept

KafkaUtils.setKafkaJAASProperties(applicationProperties, properties);

LOG.info("<== KafkaNotification()");
LOG.debug("<== KafkaNotification()");
}

@VisibleForTesting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,24 +298,24 @@ private void initialize(AtlasGraph graph) throws RepositoryException, IndexExcep
AtlasGraphManagement management = graph.getManagementSystem();

try {
LOG.info("Creating indexes for graph.");
LOG.debug("Creating indexes for graph.");

if (management.getGraphIndex(VERTEX_INDEX) == null) {
management.createVertexMixedIndex(VERTEX_INDEX, BACKING_INDEX, Collections.emptyList());

LOG.info("Created index : {}", VERTEX_INDEX);
LOG.debug("Created index : {}", VERTEX_INDEX);
}

if (management.getGraphIndex(EDGE_INDEX) == null) {
management.createEdgeMixedIndex(EDGE_INDEX, BACKING_INDEX, Collections.emptyList());

LOG.info("Created index : {}", EDGE_INDEX);
LOG.debug("Created index : {}", EDGE_INDEX);
}

if (management.getGraphIndex(FULLTEXT_INDEX) == null) {
management.createFullTextMixedIndex(FULLTEXT_INDEX, BACKING_INDEX, Collections.emptyList());

LOG.info("Created index : {}", FULLTEXT_INDEX);
LOG.debug("Created index : {}", FULLTEXT_INDEX);
}

HashMap<String, Object> ES_DATE_FIELD = new HashMap<>();
Expand Down Expand Up @@ -443,7 +443,7 @@ private void initialize(AtlasGraph graph) throws RepositoryException, IndexExcep

commit(management);

LOG.info("Index creation for global keys complete.");
LOG.debug("Index creation for global keys complete.");
} catch (Throwable t) {
LOG.error("GraphBackedSearchIndexer.initialize() failed", t);

Expand Down Expand Up @@ -850,7 +850,7 @@ public String createVertexIndex(AtlasGraphManagement management, String property
}

indexFieldName = management.addMixedIndex(VERTEX_INDEX, propertyKey, isStringField, indexTypeESConfig, indexTypeESFields);
LOG.info("Created backing index for vertex property {} of type {} ", propertyName, propertyClass.getName());
LOG.debug("Created backing index for vertex property {} of type {} ", propertyName, propertyClass.getName());
}

if(indexFieldName == null && isIndexApplicable(propertyClass, cardinality)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ private DeleteHandlerV1 getDefaultConfiguredHandler(AtlasTypeRegistry typeRegist
try {
Class handlerFromProperties = AtlasRepositoryConfiguration.getDeleteHandlerV1Impl();

LOG.info("Default delete handler set to: {}", handlerFromProperties.getName());
LOG.debug("Default delete handler set to: {}", handlerFromProperties.getName());

ret = (DeleteHandlerV1) handlerFromProperties.getConstructor(AtlasGraph.class, AtlasTypeRegistry.class, TaskManagement.class)
.newInstance(this.graph, typeRegistry, taskManagement);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ public class AtlasGraphUtilsV2 {
} catch (Exception excp) {
LOG.error("Error reading configuration", excp);
} finally {
LOG.info("atlas.use.index.query.to.find.entity.by.unique.attributes=" + USE_INDEX_QUERY_TO_FIND_ENTITY_BY_UNIQUE_ATTRIBUTES);
LOG.debug("atlas.use.index.query.to.find.entity.by.unique.attributes=" + USE_INDEX_QUERY_TO_FIND_ENTITY_BY_UNIQUE_ATTRIBUTES);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@ public EntityNotificationSender(NotificationInterface notificationInterface, Con

public EntityNotificationSender(NotificationInterface notificationInterface, boolean sendPostCommit) {
if (sendPostCommit) {
LOG.info("EntityNotificationSender: notifications will be sent after transaction commit");
LOG.debug("EntityNotificationSender: notifications will be sent after transaction commit");

this.notificationSender = new PostCommitNotificationSender(notificationInterface);
} else {
LOG.info("EntityNotificationSender: notifications will be sent inline (i.e. not waiting for transaction to commit)");
LOG.debug("EntityNotificationSender: notifications will be sent inline (i.e. not waiting for transaction to commit)");

this.notificationSender = new InlineNotificationSender(notificationInterface);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public class AccessAuditLogsIndexCreator extends Thread {
private boolean is_completed = false;

public AccessAuditLogsIndexCreator(Configuration configuration) throws IOException {
LOG.info("Starting Ranger audit schema setup in ElasticSearch.");
LOG.debug("Starting Ranger audit schema setup in ElasticSearch.");
time_interval = configuration.getLong(ES_TIME_INTERVAL, DEFAULT_ES_TIME_INTERVAL_MS);
user = configuration.getString(ES_CONFIG_USERNAME);

Expand Down Expand Up @@ -124,14 +124,14 @@ private String connectionString() {

@Override
public void run() {
LOG.info("Started run method");
LOG.debug("Started run method");
if (CollectionUtils.isNotEmpty(hosts)) {
LOG.info("Elastic search hosts=" + hosts + ", index=" + index);
LOG.debug("Elastic search hosts=" + hosts + ", index=" + index);
while (!is_completed && (max_retry == TRY_UNTIL_SUCCESS || retry_counter < max_retry)) {
try {
LOG.info("Trying to acquire elastic search connection");
LOG.debug("Trying to acquire elastic search connection");
if (connect()) {
LOG.info("Connection to elastic search established successfully");
LOG.debug("Connection to elastic search established successfully");
if (createIndex()) {
is_completed = true;
break;
Expand Down Expand Up @@ -232,18 +232,18 @@ private boolean createIndex() {

int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == 200) {
LOG.info("Entity audits index exists!");
LOG.debug("Entity audits index exists!");
exists = true;
} else {
LOG.info("Entity audits index does not exist!");
LOG.debug("Entity audits index does not exist!");
exists = false;
}

} catch (Exception e) {
LOG.info("Index " + this.index + " not available.");
LOG.warn("Index " + this.index + " not available.");
}
if (!exists) {
LOG.info("Index does not exist. Attempting to create index:" + this.index);
LOG.debug("Index does not exist. Attempting to create index:" + this.index);
try {
HttpEntity entity = new NStringEntity(es_ranger_audit_schema_json, ContentType.APPLICATION_JSON);
Request request = new Request("PUT", index);
Expand All @@ -257,15 +257,15 @@ private boolean createIndex() {
Response response = client.performRequest(request);

if (response != null && response.getStatusLine().getStatusCode() == 200) {
LOG.info("Index " + this.index + " created successfully.");
LOG.debug("Index " + this.index + " created successfully.");
exists = true;
}
} catch (Exception e) {
LOG.error("Unable to create Index. Reason:" + e.toString());
e.printStackTrace();
}
} else {
LOG.info("Index " + this.index + " is already created.");
LOG.debug("Index " + this.index + " is already created.");
}
}
return exists;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ protected void doServiceLogin(Configuration hadoopConfig,
getServerPrincipal(configuration.getString(AUTHENTICATION_PRINCIPAL), bindAddress),
configuration.getString(AUTHENTICATION_KEYTAB));
}
LOG.info("Logged in user {}", UserGroupInformation.getLoginUser());
LOG.debug("Logged in user {}", UserGroupInformation.getLoginUser());
} catch (IOException e) {
throw new IllegalStateException(String.format("Unable to perform %s login.", authenticationMethod), e);
}
Expand All @@ -99,7 +99,7 @@ protected void setupHadoopConfiguration(Configuration hadoopConfig, org.apache.c
String kerberosAuthNEnabled = configuration != null ? configuration.getString(AUTHENTICATION_KERBEROS_METHOD) : null;
// getString may return null, and would like to log the nature of the default setting
if (kerberosAuthNEnabled == null || kerberosAuthNEnabled.equalsIgnoreCase("false")) {
LOG.info("No authentication method configured. Defaulting to simple authentication");
LOG.debug("No authentication method configured. Defaulting to simple authentication");
authMethod = "simple";
} else if (kerberosAuthNEnabled.equalsIgnoreCase("true")) {
authMethod = "kerberos";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata)
LOG.warn("Running setup per configuration {}.", ATLAS_SERVER_RUN_SETUP_KEY);
return true;
} else {
LOG.info("Not running setup per configuration {}.", ATLAS_SERVER_RUN_SETUP_KEY);
LOG.debug("Not running setup per configuration {}.", ATLAS_SERVER_RUN_SETUP_KEY);
}
} catch (AtlasException e) {
LOG.error("Unable to read config to determine if setup is needed. Not running setup.");
Expand Down

0 comments on commit 338c47e

Please sign in to comment.