diff --git a/Dockerfile b/Dockerfile index a754615..26dd1ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -178,10 +178,7 @@ COPY --from=jre-build /javaruntime $JAVA_HOME COPY --from=builder-staging /staging/ / RUN true && \ - mv /etc/iofog-agent/config_new.xml /etc/iofog-agent/config.xml && \ - mv /etc/iofog-agent/config-development_new.xml /etc/iofog-agent/config-development.xml && \ - mv /etc/iofog-agent/config-production_new.xml /etc/iofog-agent/config-production.xml && \ - mv /etc/iofog-agent/config-switcher_new.xml /etc/iofog-agent/config-switcher.xml && \ + mv /etc/iofog-agent/config_new.yaml /etc/iofog-agent/config.yaml && \ mv /etc/iofog-agent/cert_new.crt /etc/iofog-agent/cert.crt && \ # /etc/iofog-agent/local-api && \ mkdir -p /var/backups/iofog-agent && \ diff --git a/build.gradle b/build.gradle index c186746..e6a175b 100644 --- a/build.gradle +++ b/build.gradle @@ -5,7 +5,7 @@ plugins { allprojects { group = 'org.eclipse' - version = '3.5.6' + version = '3.6.0' } subprojects { diff --git a/iofog-agent-daemon/build.gradle b/iofog-agent-daemon/build.gradle index 34f7df2..027af31 100644 --- a/iofog-agent-daemon/build.gradle +++ b/iofog-agent-daemon/build.gradle @@ -46,6 +46,7 @@ dependencies { implementation 'com.google.crypto.tink:tink:1.9.0' implementation 'org.bouncycastle:bcprov-jdk18on:1.80' implementation 'org.msgpack:msgpack-core:0.9.8' + implementation 'org.yaml:snakeyaml:2.2' testImplementation 'org.mockito:mockito-core:5.4.0' testImplementation 'org.mockito:mockito-junit-jupiter:5.4.0' testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.10.0' diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/Daemon.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/Daemon.java index 1019370..0515d40 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/Daemon.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/Daemon.java @@ -172,6 +172,8 @@ public void write(int b) { } public static void main(String[] args) throws ParseException { + // Set LogManager system property FIRST, before any other code + System.setProperty("java.util.logging.manager", "org.jboss.logmanager.LogManager"); try { Configuration.load(); diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineAction.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineAction.java index 7506332..7e082b2 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineAction.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineAction.java @@ -255,8 +255,7 @@ public String perform(String[] args) { try { - HashMap oldValuesMap = getOldNodeValuesForParameters(config.keySet(), - Configuration.getCurrentConfig()); + HashMap oldValuesMap = Configuration.getOldNodeValuesForParameters(config.keySet()); HashMap errorMap = setConfig(config, false); for (Map.Entry e : errorMap.entrySet()) diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineConfigParam.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineConfigParam.java index 61c185f..5bf1be9 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineConfigParam.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/command_line/CommandLineConfigParam.java @@ -55,7 +55,7 @@ public enum CommandLineConfigParam { GPS_SCAN_FREQUENCY("60", "gpsf", "gps_scan_freq", "gpsScanFrequency"), GPS_COORDINATES ("", "", "gps_coordinates", "gpscoordinates"), POST_DIAGNOSTICS_FREQ ("10", "df", "post_diagnostics_freq", "postdiagnosticsfreq"), - FOG_TYPE ("auto", "ft", "fog_type", ""), + ARCH ("auto", "ft", "arch", ""), SECURE_MODE ("off", "sec", "secure_mode", ""), ROUTER_HOST ("", "", "router_host", "routerHost"), ROUTER_PORT ("0", "", "router_port", "routerPort"), @@ -64,6 +64,7 @@ public enum CommandLineConfigParam { READY_TO_UPGRADE_SCAN_FREQUENCY ("24", "uf", "upgrade_scan_frequency", "readyToUpgradeScanFrequency"), DEV_MODE ("off", "dev", "dev_mode", ""), TIME_ZONE("", "tz", "time_zone", "timeZone"), + NAMESPACE("default", "", "namespace", "namespace"), CA_CERT("", "", "ca_cert", "caCert"), TLS_CERT("", "", "tls_cert", "tlsCert"), TLS_KEY("", "", "tls_key", "tlsKey"), diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/FieldAgent.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/FieldAgent.java index 65358d2..f3be1c6 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/FieldAgent.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/FieldAgent.java @@ -100,6 +100,7 @@ public class FieldAgent implements IOFogModule { private ScheduledFuture futureTask; private EdgeResourceManager edgeResourceManager; private VolumeMountManager volumeMountManager; + private LogSessionManager logSessionManager; private final Map activeExecSessions = new ConcurrentHashMap<>(); private final Map execCallbacks = new ConcurrentHashMap<>(); @@ -499,6 +500,16 @@ private final Future processChanges(JsonObject changes) { resetChanges = false; } } + if (changes.getBoolean("microserviceLogs", false) || changes.getBoolean("fogLogs", false)) { + logDebug("Processing log sessions changes - microserviceLogs: " + changes.getBoolean("microserviceLogs", false) + + ", fogLogs: " + changes.getBoolean("fogLogs", false)); + try { + handleLogSessions(); + } catch (Exception e) { + logError("Unable to handle log sessions", e); + resetChanges = false; + } + } } logDebug("Finished processing changes with resetChanges: " + resetChanges); return resetChanges; @@ -1066,7 +1077,15 @@ private Function containerJsonObjectToMicroserviceFunc .boxed() .map(volumeMappingObj::getJsonObject) .map(volumeMapping -> { - VolumeMappingType volumeMappingType = volumeMapping.getString("type", "bind").equals("volume") ? VolumeMappingType.VOLUME : VolumeMappingType.BIND; + VolumeMappingType volumeMappingType; + String typeStr = volumeMapping.getString("type", "bind"); + if ("volumeMount".equals(typeStr)) { + volumeMappingType = VolumeMappingType.VOLUME_MOUNT; + } else if ("volume".equals(typeStr)) { + volumeMappingType = VolumeMappingType.VOLUME; + } else { + volumeMappingType = VolumeMappingType.BIND; + } return new VolumeMapping(volumeMapping.getString("hostDestination"), volumeMapping.getString("containerDestination"), volumeMapping.getString("accessMode"), @@ -1145,6 +1164,48 @@ private Function containerJsonObjectToMicroserviceFunc microservice.setMemoryLimit(jsonObj.getJsonNumber("memoryLimit").longValue()); } + JsonValue serviceAccountValue = jsonObj.get("serviceAccount"); + if (serviceAccountValue != null && !serviceAccountValue.getValueType().equals(JsonValue.ValueType.NULL)) { + JsonObject serviceAccountObj = (JsonObject) serviceAccountValue; + String serviceAccountName = serviceAccountObj.containsKey("name") && !serviceAccountObj.isNull("name") + ? serviceAccountObj.getString("name") : null; + + RoleRef roleRef = null; + JsonValue roleRefValue = serviceAccountObj.get("roleRef"); + if (roleRefValue != null && !roleRefValue.getValueType().equals(JsonValue.ValueType.NULL)) { + JsonObject roleRefObj = (JsonObject) roleRefValue; + String kind = roleRefObj.containsKey("kind") && !roleRefObj.isNull("kind") + ? roleRefObj.getString("kind") : null; + String name = roleRefObj.containsKey("name") && !roleRefObj.isNull("name") + ? roleRefObj.getString("name") : null; + if (kind != null && name != null) { + roleRef = new RoleRef(kind, name); + } + } + + List rules = null; + JsonValue rulesValue = serviceAccountObj.get("rules"); + if (rulesValue != null && !rulesValue.getValueType().equals(JsonValue.ValueType.NULL)) { + JsonArray rulesArray = (JsonArray) rulesValue; + if (rulesArray.size() > 0) { + rules = IntStream.range(0, rulesArray.size()) + .boxed() + .map(rulesArray::getJsonObject) + .map(ruleObj -> { + List apiGroups = getStringList(ruleObj.get("apiGroups")); + List resources = getStringList(ruleObj.get("resources")); + List verbs = getStringList(ruleObj.get("verbs")); + return new Rule(apiGroups, resources, verbs); + }) + .collect(toList()); + } + } + + if (serviceAccountName != null || roleRef != null || rules != null) { + microservice.setServiceAccount(new ServiceAccount(serviceAccountName, roleRef, rules)); + } + } + try { LoggingService.setupMicroserviceLogger(microservice.getMicroserviceUuid(), microservice.getLogSize()); } catch (IOException e) { @@ -1649,6 +1710,7 @@ public JsonObject provision(String key) { // Set initial configuration Configuration.setIofogUuid(provisioningResult.getString("uuid")); Configuration.setPrivateKey(provisioningResult.getString("privateKey")); + Configuration.setNamespace(provisioningResult.getString("namespace")); Configuration.saveConfigUpdates(); Configuration.updateConfigBackUpFile(); @@ -1748,6 +1810,7 @@ public String deProvision(boolean isTokenExpired) { // Store configuration values before clearing them String iofogUuid = Configuration.getIofogUuid(); String privateKey = Configuration.getPrivateKey(); + String namespace = Configuration.getNamespace(); // Attempt deprovision request if not token expired boolean deprovisionRequestSuccessful = false; @@ -1780,6 +1843,7 @@ public String deProvision(boolean isTokenExpired) { // Configuration.setAccessToken(""); Configuration.setPrivateKey(""); Configuration.saveConfigUpdates(); + Configuration.setNamespace("default"); logDebug("Configuration cleared successfully"); // Reset JWT Manager to clear static state and allow re-initialization with new credentials @@ -1912,6 +1976,7 @@ public void start() { sshProxyManager = new SshProxyManager(new SshConnection()); edgeResourceManager = EdgeResourceManager.getInstance(); volumeMountManager = VolumeMountManager.getInstance(); + logSessionManager = new LogSessionManager(); boolean isConnected = ping(); getFogConfig(); if (!notProvisioned()) { @@ -2462,4 +2527,134 @@ public void handleExecSessionClose(String microserviceUuid, String execId) { LoggingService.logError(MODULE_NAME, "Error handling exec session close", e); } } + + /** + * Fetches log sessions from controller + * @return List of LogSession objects + * @throws Exception if fetch fails + */ + private List fetchLogSessions() throws Exception { + logDebug("Start fetching log sessions from controller"); + List sessions = new ArrayList<>(); + + if (notProvisioned() || !isControllerConnected(false)) { + logDebug("Not provisioned or not connected, returning empty list"); + return sessions; + } + + // Check thread interruption before making request + if (Thread.currentThread().isInterrupted()) { + logWarning("Thread interrupted before making log sessions request"); + throw new InterruptedException("Thread interrupted before request"); + } + + try { + logDebug("Making request to controller for log sessions"); + JsonObject response = orchestrator.request("logs/sessions", RequestType.GET, null, null); + logDebug("Received response from controller, parsing log sessions"); + if (response != null && response.containsKey("logSessions")) { + JsonArray logSessionsArray = response.getJsonArray("logSessions"); + if (logSessionsArray != null) { + for (int i = 0; i < logSessionsArray.size(); i++) { + JsonObject sessionJson = logSessionsArray.getJsonObject(i); + LogSession session = parseLogSession(sessionJson); + if (session != null) { + sessions.add(session); + } + } + } + } + logDebug("Fetched " + sessions.size() + " log sessions from controller"); + } catch (CertificateException | SSLHandshakeException e) { + verificationFailed(e); + logError("Unable to get log sessions due to broken certificate", + new AgentSystemException(e.getMessage(), e)); + throw e; + } catch (Exception e) { + logError("Unable to get log sessions", new AgentSystemException(e.getMessage(), e)); + throw e; + } + + logDebug("Finished fetching log sessions"); + return sessions; + } + + /** + * Parses a JSON object into a LogSession + */ + private LogSession parseLogSession(JsonObject jsonObj) { + try { + String sessionId = jsonObj.getString("sessionId"); + String microserviceUuid = jsonObj.containsKey("microserviceUuid") && !jsonObj.isNull("microserviceUuid") + ? jsonObj.getString("microserviceUuid") : null; + String iofogUuid = jsonObj.containsKey("iofogUuid") && !jsonObj.isNull("iofogUuid") + ? jsonObj.getString("iofogUuid") : null; + String status = jsonObj.containsKey("status") ? jsonObj.getString("status") : "PENDING"; + boolean agentConnected = jsonObj.containsKey("agentConnected") ? jsonObj.getBoolean("agentConnected") : false; + + // Parse tailConfig + Map tailConfig = new HashMap<>(); + if (jsonObj.containsKey("tailConfig") && !jsonObj.isNull("tailConfig")) { + JsonObject tailConfigJson = jsonObj.getJsonObject("tailConfig"); + if (tailConfigJson != null) { + // Parse tailConfig fields + if (tailConfigJson.containsKey("lines")) { + tailConfig.put("lines", tailConfigJson.getInt("lines")); + } + if (tailConfigJson.containsKey("follow")) { + tailConfig.put("follow", tailConfigJson.getBoolean("follow")); + } + if (tailConfigJson.containsKey("since") && !tailConfigJson.isNull("since")) { + tailConfig.put("since", tailConfigJson.getString("since")); + } + if (tailConfigJson.containsKey("until") && !tailConfigJson.isNull("until")) { + tailConfig.put("until", tailConfigJson.getString("until")); + } + } + } + + LogSession session = new LogSession(sessionId, microserviceUuid, iofogUuid, tailConfig, status, agentConnected); + return session; + } catch (Exception e) { + logError("Error parsing log session from JSON", new AgentSystemException(e.getMessage(), e)); + return null; + } + } + + /** + * Handles log sessions changes + */ + private void handleLogSessions() { + logDebug("Start handling log sessions"); + + // Check if thread is already interrupted + if (Thread.currentThread().isInterrupted()) { + logWarning("Thread already interrupted before handling log sessions"); + return; + } + + try { + List sessions = fetchLogSessions(); + if (logSessionManager != null) { + logSessionManager.handleLogSessions(sessions); + } else { + logError("LogSessionManager is not initialized", new AgentSystemException("LogSessionManager is null", null)); + } + } catch (AgentSystemException e) { + // Check if it's an interruption (might be transient) + if (e.getMessage() != null && e.getMessage().contains("Request interrupted")) { + logWarning("Log session fetch was interrupted (may be transient): " + e.getMessage()); + // Don't reset changes flag for interruptions - allow retry on next change detection + } else { + logError("Unable to handle log sessions", e); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logWarning("Thread interrupted while handling log sessions: " + e.getMessage()); + // Don't reset changes flag for interruptions - allow retry on next change detection + } catch (Exception e) { + logError("Unable to handle log sessions", e); + } + logDebug("Finished handling log sessions"); + } } \ No newline at end of file diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSession.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSession.java new file mode 100644 index 0000000..9d9e2c4 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSession.java @@ -0,0 +1,105 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.field_agent; + +import java.util.Map; + +/** + * Represents a log session configuration from the controller + * + * @author Datasance + */ +public class LogSession { + private String sessionId; + private String microserviceUuid; // For microservice logs: UUID of the microservice that needs logging. Null for fog logs. + private String iofogUuid; // For fog/agent logs: UUID of the agent itself. Null for microservice logs. + private Map tailConfig; // Configuration for log tailing (lines, follow, since, until) + private String status; // PENDING, ACTIVE + private boolean agentConnected; // Whether agent has connected to the WebSocket + + public LogSession() { + } + + public LogSession(String sessionId, String microserviceUuid, String iofogUuid, + Map tailConfig, String status, boolean agentConnected) { + this.sessionId = sessionId; + this.microserviceUuid = microserviceUuid; + this.iofogUuid = iofogUuid; + this.tailConfig = tailConfig; + this.status = status; + this.agentConnected = agentConnected; + } + + public String getSessionId() { + return sessionId; + } + + public void setSessionId(String sessionId) { + this.sessionId = sessionId; + } + + public String getMicroserviceUuid() { + return microserviceUuid; + } + + public void setMicroserviceUuid(String microserviceUuid) { + this.microserviceUuid = microserviceUuid; + } + + public String getIofogUuid() { + return iofogUuid; + } + + public void setIofogUuid(String iofogUuid) { + this.iofogUuid = iofogUuid; + } + + public Map getTailConfig() { + return tailConfig; + } + + public void setTailConfig(Map tailConfig) { + this.tailConfig = tailConfig; + } + + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + public boolean isAgentConnected() { + return agentConnected; + } + + public void setAgentConnected(boolean agentConnected) { + this.agentConnected = agentConnected; + } + + /** + * Check if this is a microservice log session + */ + public boolean isMicroserviceLog() { + return microserviceUuid != null && !microserviceUuid.isEmpty(); + } + + /** + * Check if this is a fog/agent log session + */ + public boolean isFogLog() { + return iofogUuid != null && !iofogUuid.isEmpty(); + } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSessionManager.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSessionManager.java new file mode 100644 index 0000000..76f20c8 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/field_agent/LogSessionManager.java @@ -0,0 +1,459 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.field_agent; + +import org.eclipse.iofog.microservice.Microservice; +import org.eclipse.iofog.microservice.MicroserviceManager; +import org.eclipse.iofog.process_manager.DockerUtil; +import org.eclipse.iofog.process_manager.LogTailCallback; +import org.eclipse.iofog.utils.LocalLogReader; +import org.eclipse.iofog.utils.LogSessionWebSocketHandler; +import org.eclipse.iofog.utils.configuration.Configuration; +import org.eclipse.iofog.utils.logging.LoggingService; + +import java.util.*; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +/** + * Manages active log sessions for both microservice and fog logs + */ +public class LogSessionManager { + private static final String MODULE_NAME = "LogSessionManager"; + + // Track active sessions by sessionId + private final Map activeSessions = new ConcurrentHashMap<>(); + + // WebSocket handlers keyed by sessionId + private final Map webSocketHandlers = new ConcurrentHashMap<>(); + + // Docker tail callbacks for microservice logs, keyed by sessionId + private final Map dockerTailCallbacks = new ConcurrentHashMap<>(); + + // Local log readers for fog logs, keyed by sessionId + private final Map localLogReaders = new ConcurrentHashMap<>(); + + /** + * Information about an active log session + */ + private static class LogSessionInfo { + LogSession session; + String containerId; // For microservice logs + boolean isStreaming; + + LogSessionInfo(LogSession session) { + this.session = session; + this.isStreaming = false; + } + } + + /** + * Handle fetched log sessions from controller + * Compares with currently active sessions and starts/stops as needed + */ + public void handleLogSessions(List fetchedSessions) { + LoggingService.logDebug(MODULE_NAME, "Handling log sessions: fetched=" + fetchedSessions.size() + + ", active=" + activeSessions.size()); + + // Create set of fetched session IDs + Set fetchedSessionIds = fetchedSessions.stream() + .map(LogSession::getSessionId) + .collect(Collectors.toSet()); + + // Stop sessions that are no longer in fetched list + Set activeSessionIds = new HashSet<>(activeSessions.keySet()); + for (String sessionId : activeSessionIds) { + if (!fetchedSessionIds.contains(sessionId)) { + LoggingService.logInfo(MODULE_NAME, "Stopping session no longer in controller response: " + sessionId); + stopLogSession(sessionId); + } + } + + // Start new sessions or update existing ones + for (LogSession session : fetchedSessions) { + String sessionId = session.getSessionId(); + if (activeSessions.containsKey(sessionId)) { + // Update existing session if needed + LogSessionInfo info = activeSessions.get(sessionId); + if (!info.isStreaming && "ACTIVE".equals(session.getStatus())) { + // Session was pending, now active - start streaming + LoggingService.logInfo(MODULE_NAME, "Session became active, starting stream: " + sessionId); + startLogStreaming(sessionId); + } + } else { + // New session - start it + LoggingService.logInfo(MODULE_NAME, "Starting new log session: " + sessionId); + startLogSession(session); + } + } + } + + /** + * Start a log session + */ + public void startLogSession(LogSession session) { + String sessionId = session.getSessionId(); + LoggingService.logInfo(MODULE_NAME, "Starting log session: sessionId=" + sessionId + + ", microserviceUuid=" + session.getMicroserviceUuid() + + ", iofogUuid=" + session.getIofogUuid()); + + try { + // Create session info + LogSessionInfo info = new LogSessionInfo(session); + activeSessions.put(sessionId, info); + + // Create and connect WebSocket handler + LogSessionWebSocketHandler wsHandler = LogSessionWebSocketHandler.getInstance( + sessionId, + session.getMicroserviceUuid(), + session.getIofogUuid() + ); + webSocketHandlers.put(sessionId, wsHandler); + // Set LogSessionManager reference in handler so it can start tailing when ready + wsHandler.setLogSessionManager(this); + wsHandler.connect(); + + // DO NOT start streaming immediately - wait for LOG_START message from controller + // Streaming will be started when WebSocket becomes active (in handleLogStart) + if (!session.isMicroserviceLog() && !session.isFogLog()) { + LoggingService.logError(MODULE_NAME, "Invalid log session: neither microserviceUuid nor iofogUuid set", null); + stopLogSession(sessionId); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error starting log session: " + sessionId, e); + stopLogSession(sessionId); + } + } + + /** + * Start microservice log streaming + */ + private void startMicroserviceLogStreaming(LogSession session, LogSessionInfo info) { + String sessionId = session.getSessionId(); + String microserviceUuid = session.getMicroserviceUuid(); + + LoggingService.logDebug(MODULE_NAME, "Starting microservice log streaming: sessionId=" + sessionId + + ", microserviceUuid=" + microserviceUuid); + + CompletableFuture.runAsync(() -> { + try { + // Get microservice to find container ID + Optional microserviceOpt = MicroserviceManager.getInstance() + .findLatestMicroserviceByUuid(microserviceUuid); + + if (!microserviceOpt.isPresent()) { + LoggingService.logWarning(MODULE_NAME, "Microservice not found: " + microserviceUuid + + ", will wait and retry"); + // Wait and retry similar to exec sessions + waitForMicroserviceAndStartLogging(session, info); + return; + } + + Microservice microservice = microserviceOpt.get(); + String containerId = microservice.getContainerId(); + + if (containerId == null || containerId.isEmpty()) { + LoggingService.logWarning(MODULE_NAME, "Container ID not available for microservice: " + microserviceUuid + + ", will wait and retry"); + waitForMicroserviceAndStartLogging(session, info); + return; + } + + info.containerId = containerId; + startDockerLogTailing(session, info, containerId); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error starting microservice log streaming: " + sessionId, e); + } + }); + } + + /** + * Wait for microservice to be ready and then start logging + */ + private void waitForMicroserviceAndStartLogging(LogSession session, LogSessionInfo info) { + String sessionId = session.getSessionId(); + String microserviceUuid = session.getMicroserviceUuid(); + int maxRetries = 30; // 30 retries + int retryDelayMs = 2000; // 2 seconds between retries + + CompletableFuture.runAsync(() -> { + for (int i = 0; i < maxRetries; i++) { + try { + Thread.sleep(retryDelayMs); + + // Check if session was stopped + if (!activeSessions.containsKey(sessionId)) { + LoggingService.logInfo(MODULE_NAME, "Session stopped while waiting for microservice: " + sessionId); + return; + } + + Optional microserviceOpt = MicroserviceManager.getInstance() + .findLatestMicroserviceByUuid(microserviceUuid); + + if (microserviceOpt.isPresent()) { + Microservice microservice = microserviceOpt.get(); + String containerId = microservice.getContainerId(); + + if (containerId != null && !containerId.isEmpty()) { + LoggingService.logInfo(MODULE_NAME, "Microservice ready, starting log streaming: " + sessionId); + info.containerId = containerId; + startDockerLogTailing(session, info, containerId); + return; + } + } + + LoggingService.logDebug(MODULE_NAME, "Waiting for microservice (attempt " + (i + 1) + "/" + maxRetries + + "): " + microserviceUuid); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LoggingService.logInfo(MODULE_NAME, "Interrupted while waiting for microservice: " + sessionId); + return; + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error while waiting for microservice: " + sessionId, e); + } + } + + LoggingService.logError(MODULE_NAME, "Timeout waiting for microservice: " + microserviceUuid, null); + }); + } + + /** + * Start Docker log tailing for a container + */ + private void startDockerLogTailing(LogSession session, LogSessionInfo info, String containerId) { + String sessionId = session.getSessionId(); + String microserviceUuid = session.getMicroserviceUuid(); + + try { + LoggingService.logInfo(MODULE_NAME, "Starting Docker log tailing: sessionId=" + sessionId + + ", containerId=" + containerId); + + // Create callback handler + LogTailCallback.LogTailHandler handler = new LogTailCallback.LogTailHandler() { + @Override + public void onLogLine(String sessionId, String microserviceUuid, byte[] lineBytes, com.github.dockerjava.api.model.StreamType streamType) { + LogSessionWebSocketHandler wsHandler = webSocketHandlers.get(sessionId); + if (wsHandler != null && lineBytes != null && lineBytes.length > 0) { + // Always send log line - WebSocket handler will buffer if not active + // This ensures no logs are lost during the activation period + wsHandler.sendLogLine(lineBytes); + } + } + + @Override + public void onComplete(String sessionId) { + LoggingService.logInfo(MODULE_NAME, "Docker log tailing completed: sessionId=" + sessionId); + info.isStreaming = false; + } + + @Override + public void onError(String sessionId, Throwable throwable) { + LoggingService.logError(MODULE_NAME, "Docker log tailing error: sessionId=" + sessionId, throwable); + info.isStreaming = false; + } + }; + + LogTailCallback callback = new LogTailCallback(sessionId, microserviceUuid, handler); + dockerTailCallbacks.put(sessionId, callback); + + // Start tailing + DockerUtil.getInstance().tailContainerLogs(containerId, callback, session.getTailConfig()); + info.isStreaming = true; + + LoggingService.logInfo(MODULE_NAME, "Docker log tailing started: sessionId=" + sessionId); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error starting Docker log tailing: sessionId=" + sessionId, e); + info.isStreaming = false; + } + } + + /** + * Start fog log streaming + */ + private void startFogLogStreaming(LogSession session, LogSessionInfo info) { + String sessionId = session.getSessionId(); + String iofogUuid = session.getIofogUuid(); + + LoggingService.logDebug(MODULE_NAME, "Starting fog log streaming: sessionId=" + sessionId + + ", iofogUuid=" + iofogUuid); + + try { + // Create local log reader handler + LocalLogReader.LocalLogHandler handler = new LocalLogReader.LocalLogHandler() { + @Override + public void onLogLine(String sessionId, String iofogUuid, String line) { + LogSessionWebSocketHandler wsHandler = webSocketHandlers.get(sessionId); + if (wsHandler != null && wsHandler.isActive()) { + // Send each log line as a separate message + byte[] lineBytes = line.getBytes(java.nio.charset.StandardCharsets.UTF_8); + wsHandler.sendLogLine(lineBytes); + } + } + + @Override + public void onComplete(String sessionId) { + LoggingService.logInfo(MODULE_NAME, "Local log reading completed: sessionId=" + sessionId); + info.isStreaming = false; + } + + @Override + public void onError(String sessionId, Throwable throwable) { + LoggingService.logError(MODULE_NAME, "Local log reading error: sessionId=" + sessionId, throwable); + info.isStreaming = false; + } + }; + + LocalLogReader reader = new LocalLogReader(sessionId, iofogUuid, session.getTailConfig(), handler); + localLogReaders.put(sessionId, reader); + reader.start(); + info.isStreaming = true; + + LoggingService.logInfo(MODULE_NAME, "Fog log streaming started: sessionId=" + sessionId); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error starting fog log streaming: sessionId=" + sessionId, e); + info.isStreaming = false; + } + } + + /** + * Start log streaming for a session that became active + */ + private void startLogStreaming(String sessionId) { + LogSessionInfo info = activeSessions.get(sessionId); + if (info == null) { + LoggingService.logWarning(MODULE_NAME, "Session info not found: " + sessionId); + return; + } + + if (info.isStreaming) { + LoggingService.logDebug(MODULE_NAME, "Session already streaming: " + sessionId); + return; + } + + LogSession session = info.session; + if (session.isMicroserviceLog()) { + startMicroserviceLogStreaming(session, info); + } else if (session.isFogLog()) { + startFogLogStreaming(session, info); + } + } + + /** + * Stop a log session and cleanup resources + */ + public void stopLogSession(String sessionId) { + LoggingService.logInfo(MODULE_NAME, "Stopping log session: " + sessionId); + + // Stop Docker tail callback + LogTailCallback dockerCallback = dockerTailCallbacks.remove(sessionId); + if (dockerCallback != null) { + try { + dockerCallback.close(); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error closing Docker tail callback: " + sessionId, e); + } + } + + // Stop local log reader + LocalLogReader localReader = localLogReaders.remove(sessionId); + if (localReader != null) { + try { + localReader.stop(); + localReader.cleanup(); // Add explicit cleanup to release memory + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error stopping local log reader: " + sessionId, e); + } + } + + // Disconnect WebSocket handler + LogSessionWebSocketHandler wsHandler = webSocketHandlers.remove(sessionId); + if (wsHandler != null) { + try { + wsHandler.disconnect(); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error disconnecting WebSocket handler: " + sessionId, e); + } + } + + // Remove session info + activeSessions.remove(sessionId); + + LoggingService.logInfo(MODULE_NAME, "Stopped log session: " + sessionId); + } + + /** + * Cleanup stopped sessions + */ + public void cleanupStoppedSessions() { + // This is handled by handleLogSessions comparing with fetched sessions + // But we can also check for sessions that are no longer active + Set sessionIds = new HashSet<>(activeSessions.keySet()); + for (String sessionId : sessionIds) { + LogSessionInfo info = activeSessions.get(sessionId); + if (info != null && !info.isStreaming) { + // Check if WebSocket is still connected + LogSessionWebSocketHandler wsHandler = webSocketHandlers.get(sessionId); + if (wsHandler == null || !wsHandler.isConnected()) { + LoggingService.logDebug(MODULE_NAME, "Cleaning up stopped session: " + sessionId); + stopLogSession(sessionId); + } + } + } + } + + /** + * Get active session count + */ + public int getActiveSessionCount() { + return activeSessions.size(); + } + + /** + * Start log streaming when WebSocket is activated (after LOG_START received) + * This method is called from LogSessionWebSocketHandler when it receives LOG_START + * + * @param sessionId - Session ID + * @param tailConfig - Tail configuration from LOG_START message (may override session config) + */ + public void startLogStreamingOnActivation(String sessionId, Map tailConfig) { + LogSessionInfo info = activeSessions.get(sessionId); + if (info == null) { + LoggingService.logWarning(MODULE_NAME, "Session info not found for activation: " + sessionId); + return; + } + + if (info.isStreaming) { + LoggingService.logDebug(MODULE_NAME, "Session already streaming: " + sessionId); + return; + } + + LogSession session = info.session; + LoggingService.logInfo(MODULE_NAME, "Starting log streaming on WebSocket activation: sessionId=" + sessionId); + + // Update session with tailConfig from LOG_START if provided + if (tailConfig != null && !tailConfig.isEmpty()) { + session.setTailConfig(tailConfig); + LoggingService.logDebug(MODULE_NAME, "Updated session tailConfig from LOG_START: " + sessionId); + } + + if (session.isMicroserviceLog()) { + startMicroserviceLogStreaming(session, info); + } else if (session.isFogLog()) { + startFogLogStreaming(session, info); + } else { + LoggingService.logError(MODULE_NAME, "Invalid log session type for activation: " + sessionId, null); + } + } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsManager.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsManager.java index 26d41a2..10b372a 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsManager.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsManager.java @@ -58,7 +58,7 @@ public String getModuleName() { } /** - * Start GPS module in AUTO mode by default + * Start GPS module based on configured mode */ public void start() { if (isRunning) { @@ -68,14 +68,27 @@ public void start() { try { LoggingService.logInfo(MODULE_NAME, "Starting GPS Manager"); - // Initialize in AUTO mode by default - initializeAutoMode(); - - // Start coordinate update scheduler + // Start coordinate update scheduler first (non-blocking) startCoordinateUpdateScheduler(); + // Mark as running immediately to avoid blocking startup isRunning = true; LoggingService.logInfo(MODULE_NAME, "GPS Manager started successfully"); + + // Initialize GPS coordinates asynchronously to avoid blocking startup + // This prevents DNS resolution hangs from blocking the main thread + // Initialize based on configured mode, not always AUTO + scheduler.execute(() -> { + try { + LoggingService.logDebug(MODULE_NAME, "Initializing GPS coordinates in background"); + initializeGps(); + LoggingService.logDebug(MODULE_NAME, "GPS coordinates initialization completed"); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error initializing GPS in background", e); + status.setHealthStatus(GpsStatus.GpsHealthStatus.IP_ERROR); + startOffMode(); + } + }); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error starting GPS Manager", e); stop(); @@ -141,6 +154,9 @@ public void instanceConfigUpdated() { startOffMode(); } + // Update scheduler frequency (handles changes from 0 to positive or vice versa) + changeGpsScanFrequencyInterval(); + LoggingService.logDebug(MODULE_NAME, "GPS configuration update completed"); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error handling GPS configuration update", e); @@ -154,6 +170,40 @@ public GpsStatus getStatus() { return status; } + /** + * Initialize GPS based on configured mode + * This method respects the configured mode and doesn't overwrite manual coordinates + */ + private void initializeGps() { + try { + GpsMode currentMode = Configuration.getGpsMode(); + String gpsDevice = Configuration.getGpsDevice(); + + LoggingService.logDebug(MODULE_NAME, "Initializing GPS in mode: " + currentMode); + + // Handle mode initialization based on configured mode + if (currentMode == GpsMode.DYNAMIC && gpsDevice != null && !gpsDevice.isEmpty()) { + startDynamicMode(); + } else if (currentMode == GpsMode.AUTO) { + initializeAutoMode(); + } else if (currentMode == GpsMode.DYNAMIC && (gpsDevice == null || gpsDevice.isEmpty())) { + startManualMode(); + } else if (currentMode == GpsMode.MANUAL) { + startManualMode(); + } else if (currentMode == GpsMode.OFF) { + startOffMode(); + } else { + // Default to AUTO if mode is not set or invalid + LoggingService.logWarning(MODULE_NAME, "GPS mode not configured, defaulting to AUTO"); + initializeAutoMode(); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error initializing GPS", e); + status.setHealthStatus(GpsStatus.GpsHealthStatus.IP_ERROR); + startOffMode(); + } + } + /** * Initialize in AUTO mode */ @@ -290,13 +340,18 @@ private void startOffMode() { private void startCoordinateUpdateScheduler() { try { long scanFrequency = Configuration.getGpsScanFrequency(); - coordinateUpdateTask = scheduler.scheduleAtFixedRate( - this::updateCoordinates, - 0, - scanFrequency, - TimeUnit.SECONDS - ); - LoggingService.logDebug(MODULE_NAME, "Started coordinate update scheduler with frequency: " + scanFrequency + " seconds"); + // Only schedule if frequency is positive (like DockerPruningManager) + if (scanFrequency > 0) { + coordinateUpdateTask = scheduler.scheduleAtFixedRate( + this::updateCoordinates, + 0, + scanFrequency, + TimeUnit.SECONDS + ); + LoggingService.logDebug(MODULE_NAME, "Started coordinate update scheduler with frequency: " + scanFrequency + " seconds"); + } else { + LoggingService.logDebug(MODULE_NAME, "GPS scan frequency is 0 - coordinate update scheduler not started"); + } } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error starting coordinate update scheduler", e); } @@ -376,6 +431,32 @@ private void updateAutoCoordinates() { } } + /** + * Update GPS scan frequency interval + * This method will reschedule the coordinate update scheduler with the new frequency + * Similar to DockerPruningManager.changePruningFreqInterval() + */ + public void changeGpsScanFrequencyInterval() { + // Cancel existing task if running + if (coordinateUpdateTask != null) { + coordinateUpdateTask.cancel(true); + coordinateUpdateTask = null; + } + + long scanFrequency = Configuration.getGpsScanFrequency(); + if (scanFrequency > 0) { + coordinateUpdateTask = scheduler.scheduleAtFixedRate( + this::updateCoordinates, + 0, + scanFrequency, + TimeUnit.SECONDS + ); + LoggingService.logInfo(MODULE_NAME, "GPS scan frequency updated to: " + scanFrequency + " seconds"); + } else { + LoggingService.logInfo(MODULE_NAME, "GPS scan frequency set to 0 - coordinate update scheduler disabled"); + } + } + /** * Check if GPS module is running */ diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsWebHandler.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsWebHandler.java index 8ca6bcd..952aa83 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsWebHandler.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/gps/GpsWebHandler.java @@ -20,12 +20,19 @@ import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.eclipse.iofog.utils.logging.LoggingService.logError; public class GpsWebHandler { private static final String MODULE_NAME = "GPS Web Handler"; + private static final int DNS_RESOLUTION_TIMEOUT_SECONDS = 10; // Total timeout including DNS resolution + private static final ExecutorService executorService = Executors.newCachedThreadPool(); /** * gets GPS coordinates by external ip from http://ip-api.com/ @@ -72,14 +79,25 @@ public static String getExternalIp() { * @return JsonObject */ private static JsonObject getGeolocationData() throws Exception { - URL url = new URL("http://ip-api.com/json"); - HttpURLConnection connection = (HttpURLConnection) url.openConnection(); - connection.setConnectTimeout(3000); // 3 seconds - connection.setReadTimeout(3000); // 3 seconds - BufferedReader ipReader = new BufferedReader( - new InputStreamReader(connection.getInputStream())); - JsonReader jsonReader = Json.createReader(ipReader); - return jsonReader.readObject(); + // Wrap the HTTP request in a Future with timeout to handle DNS resolution hangs + Future future = executorService.submit(() -> { + URL url = new URL("http://ip-api.com/json"); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setConnectTimeout(3000); // 3 seconds + connection.setReadTimeout(3000); // 3 seconds + BufferedReader ipReader = new BufferedReader( + new InputStreamReader(connection.getInputStream())); + JsonReader jsonReader = Json.createReader(ipReader); + return jsonReader.readObject(); + }); + + try { + // Wait for the request with a total timeout that includes DNS resolution + return future.get(DNS_RESOLUTION_TIMEOUT_SECONDS, TimeUnit.SECONDS); + } catch (TimeoutException e) { + future.cancel(true); + throw new Exception("Timeout while getting geolocation data (DNS resolution or connection timeout)", e); + } } } diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Microservice.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Microservice.java index 8b91869..f7e377b 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Microservice.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Microservice.java @@ -59,6 +59,7 @@ public class Microservice { private boolean deleteWithCleanup; private boolean isStuckInRestart; private Healthcheck healthcheck; + private ServiceAccount serviceAccount; public Microservice(String microserviceUuid, String imageName) { this.microserviceUuid = microserviceUuid; @@ -347,4 +348,12 @@ public void setMemoryLimit(Long memoryLimitMB) { // Convert MB to bytes (1 MB = 1024 * 1024 bytes) this.memoryLimit = memoryLimitMB != null ? memoryLimitMB * 1024 * 1024 : null; } + + public ServiceAccount getServiceAccount() { + return serviceAccount; + } + + public void setServiceAccount(ServiceAccount serviceAccount) { + this.serviceAccount = serviceAccount; + } } diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/RoleRef.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/RoleRef.java new file mode 100644 index 0000000..2aa8e85 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/RoleRef.java @@ -0,0 +1,55 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.microservice; + +import java.util.Objects; + +public class RoleRef { + private final String kind; + private final String name; + + public RoleRef(String kind, String name) { + this.kind = kind; + this.name = name; + } + + public String getKind() { + return kind; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RoleRef roleRef = (RoleRef) o; + return Objects.equals(kind, roleRef.kind) && + Objects.equals(name, roleRef.name); + } + + @Override + public int hashCode() { + return Objects.hash(kind, name); + } + + @Override + public String toString() { + return "RoleRef{" + + "kind='" + kind + '\'' + + ", name='" + name + '\'' + + '}'; + } +} diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Rule.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Rule.java new file mode 100644 index 0000000..995f00e --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/Rule.java @@ -0,0 +1,64 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.microservice; + +import java.util.List; +import java.util.Objects; + +public class Rule { + private final List apiGroups; + private final List resources; + private final List verbs; + + public Rule(List apiGroups, List resources, List verbs) { + this.apiGroups = apiGroups; + this.resources = resources; + this.verbs = verbs; + } + + public List getApiGroups() { + return apiGroups; + } + + public List getResources() { + return resources; + } + + public List getVerbs() { + return verbs; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Rule rule = (Rule) o; + return Objects.equals(apiGroups, rule.apiGroups) && + Objects.equals(resources, rule.resources) && + Objects.equals(verbs, rule.verbs); + } + + @Override + public int hashCode() { + return Objects.hash(apiGroups, resources, verbs); + } + + @Override + public String toString() { + return "Rule{" + + "apiGroups=" + apiGroups + + ", resources=" + resources + + ", verbs=" + verbs + + '}'; + } +} diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/ServiceAccount.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/ServiceAccount.java new file mode 100644 index 0000000..b5107a1 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/ServiceAccount.java @@ -0,0 +1,64 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.microservice; + +import java.util.List; +import java.util.Objects; + +public class ServiceAccount { + private final String name; + private final RoleRef roleRef; + private final List rules; + + public ServiceAccount(String name, RoleRef roleRef, List rules) { + this.name = name; + this.roleRef = roleRef; + this.rules = rules; + } + + public String getName() { + return name; + } + + public RoleRef getRoleRef() { + return roleRef; + } + + public List getRules() { + return rules; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ServiceAccount that = (ServiceAccount) o; + return Objects.equals(name, that.name) && + Objects.equals(roleRef, that.roleRef) && + Objects.equals(rules, that.rules); + } + + @Override + public int hashCode() { + return Objects.hash(name, roleRef, rules); + } + + @Override + public String toString() { + return "ServiceAccount{" + + "name='" + name + '\'' + + ", roleRef=" + roleRef + + ", rules=" + rules + + '}'; + } +} diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/VolumeMappingType.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/VolumeMappingType.java index 8a3d45a..3e1fd0f 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/VolumeMappingType.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/microservice/VolumeMappingType.java @@ -2,5 +2,6 @@ public enum VolumeMappingType { VOLUME, - BIND + BIND, + VOLUME_MOUNT // New type for volume mounts } diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/ContainerManager.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/ContainerManager.java index dbf7b61..5a3554e 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/ContainerManager.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/ContainerManager.java @@ -17,6 +17,7 @@ import com.github.dockerjava.api.model.Container; import com.github.dockerjava.api.model.Image; import org.eclipse.iofog.microservice.*; +import org.eclipse.iofog.volume_mount.VolumeMountManager; import org.eclipse.iofog.exception.AgentSystemException; import org.eclipse.iofog.network.IOFogNetworkInterfaceManager; import org.eclipse.iofog.status_reporter.StatusReporter; @@ -236,6 +237,13 @@ private void removeContainerByMicroserviceUuid(String microserviceUuid, boolean setMicroserviceStatus(microserviceUuid, MicroserviceState.DELETED); } } + // Clean up per-microservice volume mounts + try { + VolumeMountManager.getInstance().cleanupMicroserviceVolumes(microserviceUuid); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error cleaning up microservice volumes: " + e.getMessage()); + // Continue with container removal even if cleanup fails + } LoggingService.logInfo(MODULE_NAME, "Finished remove container with microserviceuuid : " + microserviceUuid); } diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/DockerUtil.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/DockerUtil.java index 5513130..10ae62c 100755 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/DockerUtil.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/DockerUtil.java @@ -35,6 +35,8 @@ import org.eclipse.iofog.exception.AgentSystemException; import org.eclipse.iofog.exception.AgentUserException; import org.eclipse.iofog.microservice.*; +import org.eclipse.iofog.volume_mount.VolumeMountManager; +import org.eclipse.iofog.volume_mount.VolumeMountType; import org.eclipse.iofog.status_reporter.StatusReporter; import org.eclipse.iofog.utils.Constants; import org.eclipse.iofog.utils.configuration.Configuration; @@ -113,8 +115,8 @@ private void initDockerClient() { .withDockerHttpClient(httpClient) .build(); - // Ensure pot network exists during initialization - ensurePotNetworkExists(); + // Ensure namespace network exists during initialization + ensureNamespaceNetworkExists(); } catch (Exception e) { logError(MODULE_NAME,"Docker client initialization failed", new AgentUserException(e.getMessage(), e)); @@ -786,13 +788,30 @@ public String createContainer(Microservice microservice, String host) throws Not } // Resolve host destination for volume mounts - String resolvedHostDestination = resolveVolumeMountPath(volumeMapping.getHostDestination()); - - Mount mount = (new Mount()) + String resolvedHostDestination = resolveVolumeMountPath( + volumeMapping.getHostDestination(), + volumeMapping.getType(), + microservice.getMicroserviceUuid()); + + Mount mount; + if (volumeMapping.getType() == VolumeMappingType.VOLUME_MOUNT) { + // Unified bind mount approach for both secrets and configMaps + // If mounting a specific file (key specified in hostDestination), Docker will mount it as a file + // If mounting entire directory (no key), Docker will mount it as a directory + mount = (new Mount()) + .withSource(resolvedHostDestination) + .withType(MountType.BIND) + .withTarget(volumeMapping.getContainerDestination()) + .withReadOnly(isReadOnly); + // Security is provided by file permissions (600 for secrets, 644 for configMaps) + } else { + // Existing logic for BIND and VOLUME types + mount = (new Mount()) .withSource(resolvedHostDestination) .withType(volumeMapping.getType() == VolumeMappingType.BIND ? MountType.BIND : MountType.VOLUME) .withTarget(volumeMapping.getContainerDestination()) .withReadOnly(isReadOnly); + } volumeMounts.add(mount); }); } @@ -856,7 +875,7 @@ public String createContainer(Microservice microservice, String host) throws Not logFiles = (int) (microservice.getLogSize() / 2); containerLogConfig.put("max-file", String.valueOf(logFiles)); - containerLogConfig.put("max-size", "2m"); + containerLogConfig.put("max-size", "100m"); LogConfig containerLog = new LogConfig(LogConfig.LoggingType.DEFAULT, containerLogConfig); List envVars = new ArrayList<>(Arrays.asList("SELFNAME=" + microservice.getMicroserviceUuid())); @@ -937,13 +956,13 @@ public String createContainer(Microservice microservice, String host) throws Not hostConfig.withNetworkMode("host"); } } else if(hosts.length > 0) { - hostConfig.withNetworkMode("pot").withExtraHosts(hosts); + hostConfig.withNetworkMode(getNamespaceNetworkName()).withExtraHosts(hosts); } } else if (SystemUtils.IS_OS_LINUX || SystemUtils.IS_OS_MAC) { if(microservice.isHostNetworkMode()){ hostConfig.withNetworkMode("host"); } else if(hosts.length > 0) { - hostConfig.withNetworkMode("pot").withExtraHosts(hosts); + hostConfig.withNetworkMode(getNamespaceNetworkName()).withExtraHosts(hosts); } } @@ -1121,31 +1140,56 @@ public List getRunningNonIofogContainers() { } /** - * Checks if the "pot" network exists and creates it if it doesn't - * @return true if network exists or was created successfully, false otherwise + * Checks if the "namespace" network exists and creates it if it doesn't + * Skips creation if namespace is not set yet (will be set after provisioning) + * @return true if network exists or was created successfully, or if namespace not set yet; false on error */ - private boolean ensurePotNetworkExists() { + public boolean ensureNamespaceNetworkExists() { try { + String namespace = Configuration.getNamespace(); + + // Skip network creation if namespace is not set yet (will be set after provisioning) + if (namespace == null || namespace.trim().isEmpty()) { + LoggingService.logDebug(MODULE_NAME, "Namespace not set yet, skipping network creation. Network will be created after provisioning."); + return true; // Return true to not fail initialization + } + + // Prefix network name to avoid conflicts with reserved Docker network names + String networkName = getNamespaceNetworkName(); + List networks = dockerClient.listNetworksCmd().exec(); - boolean potNetworkExists = networks.stream() - .anyMatch(network -> "pot".equals(network.getName())); + boolean namespaceNetworkExists = networks.stream() + .anyMatch(network -> networkName.equals(network.getName())); - if (!potNetworkExists) { - LoggingService.logInfo(MODULE_NAME, "Creating 'pot' bridge network"); + if (!namespaceNetworkExists) { + LoggingService.logInfo(MODULE_NAME, "Creating namespace bridge network: " + networkName); dockerClient.createNetworkCmd() - .withName("pot") + .withName(networkName) .withDriver("bridge") .exec(); - LoggingService.logInfo(MODULE_NAME, "Successfully created 'pot' bridge network"); + LoggingService.logInfo(MODULE_NAME, "Successfully created namespace bridge network: " + networkName); } return true; } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Failed to ensure 'pot' network exists", + LoggingService.logError(MODULE_NAME, "Failed to ensure 'namespace' network exists", new AgentSystemException(e.getMessage(), e)); return false; } } + /** + * Gets the prefixed network name for the current namespace + * Prefixes with "iofog-" to avoid conflicts with reserved Docker network names + * @return Prefixed network name (e.g., "iofog-default", "iofog-production") + */ + public static String getNamespaceNetworkName() { + String namespace = Configuration.getNamespace(); + if (namespace == null || namespace.trim().isEmpty()) { + return "iofog-default"; + } + return "iofog-" + namespace; + } + /** * Gets the IP address of the router microservice container * @return IP address of the router microservice container or null if not found @@ -1194,49 +1238,118 @@ private Map parseAnnotationsString(String annotationsString) { return annotationsMap; } /** - * Resolves volume mount paths that start with $VolumeMount prefix - * @param hostDestination The host destination path from volume mapping + * Resolves volume mount paths for VOLUME_MOUNT type + * @param hostDestination The host destination (volume mount name for VOLUME_MOUNT type) + * @param volumeMappingType The volume mapping type + * @param microserviceUuid The microservice UUID * @return Resolved host destination path */ - private String resolveVolumeMountPath(String hostDestination) { - // Check if this is a volume mount reference - if (!hostDestination.startsWith("$VolumeMount/")) { - return hostDestination; // Return as-is if not a volume mount + private String resolveVolumeMountPath(String hostDestination, VolumeMappingType volumeMappingType, String microserviceUuid) { + // Handle new VOLUME_MOUNT type + if (volumeMappingType == VolumeMappingType.VOLUME_MOUNT) { + // Parse hostDestination to extract volume name and optional key + // Format: "volume-name" or "volume-name/key-name" + String volumeName; + String keyName = null; + + int slashIndex = hostDestination.indexOf('/'); + if (slashIndex > 0) { + // Key is specified: "volume-name/key-name" + volumeName = hostDestination.substring(0, slashIndex); + keyName = hostDestination.substring(slashIndex + 1); + } else { + // No key specified: "volume-name" (mount entire directory) + volumeName = hostDestination; + } + + // Look up volume mount type from cache (O(1) lookup) + VolumeMountManager volumeMountManager = VolumeMountManager.getInstance(); + VolumeMountType volumeMountType = volumeMountManager.getVolumeMountType(volumeName); + + if (volumeMountType == null) { + LoggingService.logWarning(MODULE_NAME, + "Volume mount type not found for: " + volumeName + ", defaulting to SECRET"); + volumeMountType = VolumeMountType.SECRET; + } + + // Prepare per-microservice mount point + String mountPath = volumeMountManager.prepareMicroserviceVolumeMount( + microserviceUuid, volumeName, volumeMountType); + + // If key is specified, append it to mount path to point to specific file + if (keyName != null) { + mountPath = mountPath + "/" + keyName; + } + + // Check if agent is running in container + String iofogDaemon = System.getenv("IOFOG_DAEMON"); + boolean isContainer = "container".equals(iofogDaemon != null ? iofogDaemon.toLowerCase() : null); + + if (isContainer) { + // Agent running in container - need to check volume mounting + try { + // Check if iofog-agent-directory volume exists + List volumes = dockerClient.listVolumesCmd().exec().getVolumes(); + boolean volumeExists = volumes.stream() + .anyMatch(vol -> "iofog-agent-directory".equals(vol.getName())); + + if (volumeExists) { + // Volume exists - inspect it to get mount point + InspectVolumeResponse volumeInfo = dockerClient.inspectVolumeCmd("iofog-agent-directory").exec(); + String mountPoint = volumeInfo.getMountpoint(); + // Convert absolute path to relative path within volume + String diskDir = Configuration.getDiskDirectory(); + if (mountPath.startsWith(diskDir)) { + return mountPoint + mountPath.substring(diskDir.length()); + } + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error checking volume mount, using direct path: " + e.getMessage()); + } + } + + return mountPath; } - // Extract the volume name from $VolumeMount/name - String volumeName = hostDestination.substring("$VolumeMount/".length()); - - // Check if agent is running in container - String iofogDaemon = System.getenv("IOFOG_DAEMON"); - boolean isContainer = "container".equals(iofogDaemon != null ? iofogDaemon.toLowerCase() : null); - - if (!isContainer) { - // Agent running on host - use disk directory directly - return Configuration.getDiskDirectory() + "volumes/" + volumeName; - } else { - // Agent running in container - need to check volume mounting - try { - // Check if iofog-agent-directory volume exists - List volumes = dockerClient.listVolumesCmd().exec().getVolumes(); - boolean volumeExists = volumes.stream() - .anyMatch(vol -> "iofog-agent-directory".equals(vol.getName())); - - if (volumeExists) { - // Volume exists - inspect it to get mount point - InspectVolumeResponse volumeInfo = dockerClient.inspectVolumeCmd("iofog-agent-directory").exec(); - String mountPoint = volumeInfo.getMountpoint(); - return mountPoint + "/volumes/" + volumeName; - } else { - // Volume doesn't exist - assume bind mount, use disk directory + // Legacy handling for $VolumeMount/ prefix (backward compatibility) + if (hostDestination.startsWith("$VolumeMount/")) { + String volumeName = hostDestination.substring("$VolumeMount/".length()); + + // Check if agent is running in container + String iofogDaemon = System.getenv("IOFOG_DAEMON"); + boolean isContainer = "container".equals(iofogDaemon != null ? iofogDaemon.toLowerCase() : null); + + if (!isContainer) { + // Agent running on host - use disk directory directly + return Configuration.getDiskDirectory() + "volumes/" + volumeName; + } else { + // Agent running in container - need to check volume mounting + try { + // Check if iofog-agent-directory volume exists + List volumes = dockerClient.listVolumesCmd().exec().getVolumes(); + boolean volumeExists = volumes.stream() + .anyMatch(vol -> "iofog-agent-directory".equals(vol.getName())); + + if (volumeExists) { + // Volume exists - inspect it to get mount point + InspectVolumeResponse volumeInfo = dockerClient.inspectVolumeCmd("iofog-agent-directory").exec(); + String mountPoint = volumeInfo.getMountpoint(); + return mountPoint + "/volumes/" + volumeName; + } else { + // Volume doesn't exist - assume bind mount, use disk directory + return Configuration.getDiskDirectory() + "volumes/" + volumeName; + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error checking volume mount, falling back to disk directory: " + e.getMessage()); return Configuration.getDiskDirectory() + "volumes/" + volumeName; } - } catch (Exception e) { - LoggingService.logWarning(MODULE_NAME, - "Error checking volume mount, falling back to disk directory: " + e.getMessage()); - return Configuration.getDiskDirectory() + "volumes/" + volumeName; } } + + // Return as-is for BIND and VOLUME types + return hostDestination; } class ItemStatus { @@ -1377,6 +1490,102 @@ public void killExecSession(String execId) throws Exception { } } + /** + * Tails container logs using Docker API + * + * @param containerId - ID of the container + * @param callback - Callback to handle log frames + * @param tailConfig - Configuration for log tailing (lines, follow, since, until) + * @throws Exception if log tailing fails + */ + public void tailContainerLogs(String containerId, LogTailCallback callback, Map tailConfig) throws Exception { + LoggingService.logInfo(MODULE_NAME, "Starting to tail container logs: containerId=" + containerId); + try { + // Parse tail config with defaults + boolean follow = tailConfig != null && tailConfig.containsKey("follow") + ? (Boolean) tailConfig.get("follow") : true; + Integer tailLines = tailConfig != null && tailConfig.containsKey("lines") + ? ((Number) tailConfig.get("lines")).intValue() : 100; + String since = tailConfig != null && tailConfig.containsKey("since") + ? (String) tailConfig.get("since") : null; + String until = tailConfig != null && tailConfig.containsKey("until") + ? (String) tailConfig.get("until") : null; + + // Validate tail lines (1-10000) + if (tailLines != null && tailLines < 1) tailLines = 100; + if (tailLines != null && tailLines > 10000) tailLines = 10000; + + LoggingService.logDebug(MODULE_NAME, "Tail config: follow=" + follow + + ", lines=" + tailLines + + ", since=" + since + + ", until=" + until); + + // Build log container command - use imported LogContainerCmd + LogContainerCmd logCmd = dockerClient.logContainerCmd(containerId) + .withStdOut(true) // CRITICAL: Must be true to get stdout logs + .withStdErr(true) // CRITICAL: Must be true to get stderr logs + .withTimestamps(false); + + // Optimize parameter order and usage based on docker-java best practices: + // 1. Set tail/tailAll first for better performance + // 2. When since is provided WITHOUT follow, don't limit with tail - get all logs from since + // 3. When since is provided WITH follow, use tail to limit initial lines + // 4. Use withTailAll() when tailLines is null or very large + + boolean useTailAll = (tailLines == null || tailLines >= 10000); + boolean hasSince = (since != null && !since.isEmpty()); + + if (hasSince && !follow) { + // When since is set without follow, get all logs from that timestamp + // Don't use tail to ensure we get all matching logs + LoggingService.logDebug(MODULE_NAME, "Using 'since' without follow - getting all logs from timestamp"); + } else if (useTailAll) { + // Use withTailAll() when tail is very large or not specified + logCmd.withTailAll(); + LoggingService.logDebug(MODULE_NAME, "Using withTailAll() - getting all available logs"); + } else if (tailLines != null) { + // Use withTail() for specific number of lines + logCmd.withTail(tailLines); + LoggingService.logDebug(MODULE_NAME, "Using withTail(" + tailLines + ") - getting last " + tailLines + " lines"); + } + + // Set since parameter (if provided) + if (hasSince) { + try { + // Parse ISO 8601 timestamp to Unix timestamp (seconds since epoch) + java.time.Instant instant = java.time.Instant.parse(since); + long unixTimestamp = instant.getEpochSecond(); + logCmd.withSince((int) unixTimestamp); + LoggingService.logDebug(MODULE_NAME, "Using 'since' timestamp: " + since + " -> " + unixTimestamp); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Invalid since timestamp format: " + since + " - " + e.getMessage()); + } + } + + // Add until if provided + if (until != null && !until.isEmpty()) { + try { + java.time.Instant instant = java.time.Instant.parse(until); + long unixTimestamp = instant.getEpochSecond(); + logCmd.withUntil((int) unixTimestamp); + LoggingService.logDebug(MODULE_NAME, "Parsed until timestamp: " + until + " -> " + unixTimestamp); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Invalid until timestamp format: " + until + " - " + e.getMessage()); + } + } + + // Set follow last (after all other parameters) + logCmd.withFollowStream(follow); + + // Execute log command with callback + logCmd.exec(callback); + LoggingService.logInfo(MODULE_NAME, "Started tailing container logs: containerId=" + containerId); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error tailing container logs: containerId=" + containerId, e); + throw e; + } + } + /** * Callback class for handling exec session output and managing timeouts */ diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/LogTailCallback.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/LogTailCallback.java new file mode 100644 index 0000000..83d871e --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/process_manager/LogTailCallback.java @@ -0,0 +1,177 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.process_manager; + +import com.github.dockerjava.api.async.ResultCallbackTemplate; +import com.github.dockerjava.api.model.Frame; +import com.github.dockerjava.api.model.StreamType; +import org.eclipse.iofog.utils.logging.LoggingService; + +import java.io.Closeable; +import java.nio.charset.StandardCharsets; + +/** + * Callback for handling Docker container log tailing + * Handles frame splitting to ensure all log lines are properly extracted + */ +public class LogTailCallback extends ResultCallbackTemplate implements Closeable { + private static final String MODULE_NAME = "LogTailCallback"; + + private final String sessionId; + private final String microserviceUuid; + private final LogTailHandler handler; + private volatile boolean isRunning = true; + + // Buffer for partial log lines that span multiple frames + private StringBuilder lineBuffer = new StringBuilder(); + // Track StreamType for the buffered content (used when flushing) + private StreamType bufferedStreamType = StreamType.STDOUT; + + public LogTailCallback(String sessionId, String microserviceUuid, LogTailHandler handler) { + this.sessionId = sessionId; + this.microserviceUuid = microserviceUuid; + this.handler = handler; + LoggingService.logDebug(MODULE_NAME, "Created LogTailCallback: sessionId=" + sessionId + ", microserviceUuid=" + microserviceUuid); + } + + @Override + public void onNext(Frame frame) { + if (!isRunning) { + return; + } + + try { + byte[] payload = frame.getPayload(); + if (payload == null || payload.length == 0) { + return; + } + + StreamType streamType = frame.getStreamType(); + LoggingService.logDebug(MODULE_NAME, + "Received log frame: sessionId=" + sessionId + + ", length=" + payload.length + + ", streamType=" + streamType); + + // Convert payload to string and process line by line + // CRITICAL: Docker frames can contain multiple log lines or partial lines + String frameContent = new String(payload, StandardCharsets.UTF_8); + + // Append to buffer (handles partial lines from previous frame) + lineBuffer.append(frameContent); + // Update stream type for buffered content (last frame's type wins for partial lines) + bufferedStreamType = streamType; + + // Process complete lines (split by \n) + String bufferContent = lineBuffer.toString(); + int lastNewlineIndex = bufferContent.lastIndexOf('\n'); + + if (lastNewlineIndex >= 0) { + // We have at least one complete line + String completeLines = bufferContent.substring(0, lastNewlineIndex + 1); + String remaining = bufferContent.substring(lastNewlineIndex + 1); + + // Split complete lines and send each one + // Using split("\n") without limit will drop trailing empty strings, but we want them + // So we use split("\n", -1) to preserve all empty strings + String[] lines = completeLines.split("\n", -1); + + // Send all lines except the last one (which is empty if completeLines ends with \n) + // The last element is always empty because completeLines ends with \n + for (int i = 0; i < lines.length - 1; i++) { + sendLogLine(lines[i], streamType); + } + + // Keep remaining partial line in buffer + lineBuffer = new StringBuilder(remaining); + } + // If no newline found, the entire frame content is a partial line + // It will be kept in buffer until we get the rest + + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error processing log frame", e); + } + } + + /** + * Send a complete log line to the handler + */ + private void sendLogLine(String line, StreamType streamType) { + if (handler != null) { + // Send even empty lines - they are valid log entries + byte[] lineBytes = line.getBytes(StandardCharsets.UTF_8); + handler.onLogLine(sessionId, microserviceUuid, lineBytes, streamType); + } + } + + /** + * Flush any remaining buffered content (called on close/complete) + */ + private void flushBuffer() { + if (lineBuffer.length() > 0) { + String remainingLine = lineBuffer.toString(); + if (!remainingLine.isEmpty()) { + // Send the final partial line (if any) using the stream type from last frame + LoggingService.logDebug(MODULE_NAME, "Flushing final partial line: length=" + remainingLine.length() + ", streamType=" + bufferedStreamType); + byte[] lineBytes = remainingLine.getBytes(StandardCharsets.UTF_8); + if (handler != null) { + handler.onLogLine(sessionId, microserviceUuid, lineBytes, bufferedStreamType); + } + } + lineBuffer.setLength(0); + } + } + + @Override + public void onComplete() { + LoggingService.logInfo(MODULE_NAME, "Log tailing completed: sessionId=" + sessionId); + isRunning = false; + flushBuffer(); // Send any remaining buffered content + if (handler != null) { + handler.onComplete(sessionId); + } + } + + @Override + public void onError(Throwable throwable) { + LoggingService.logError(MODULE_NAME, "Log tailing error: sessionId=" + sessionId, throwable); + isRunning = false; + flushBuffer(); // Try to send any remaining content even on error + if (handler != null) { + handler.onError(sessionId, throwable); + } + } + + @Override + public void close() { + if (isRunning) { + isRunning = false; + flushBuffer(); + LoggingService.logInfo(MODULE_NAME, "Closing LogTailCallback: sessionId=" + sessionId); + } + } + + public boolean isRunning() { + return isRunning; + } + + /** + * Handler interface for log tail callbacks + * Updated to accept line bytes and stream type separately + */ + public interface LogTailHandler { + void onLogLine(String sessionId, String microserviceUuid, byte[] lineBytes, StreamType streamType); + void onComplete(String sessionId); + void onError(String sessionId, Throwable throwable); + } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Constants.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Constants.java index d924e1f..87bb260 100755 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Constants.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Constants.java @@ -96,14 +96,8 @@ public String fullValue() { private static final String CONFIG_DIR = SystemUtils.IS_OS_WINDOWS ? WINDOWS_IOFOG_PATH : SNAP_COMMON + "/etc/iofog-agent/"; public static final String LOCAL_API_TOKEN_PATH = CONFIG_DIR + "local-api"; - public static final String DEFAULT_CONFIG_PATH = CONFIG_DIR + "config.xml"; - public static final String DEVELOPMENT_CONFIG_PATH = CONFIG_DIR + "config-development.xml"; - public static final String PRODUCTION_CONFIG_PATH = CONFIG_DIR + "config-production.xml"; - public static String BACKUP_CONFIG_PATH = CONFIG_DIR + "config-bck.xml"; - - public static final String CONFIG_SWITCHER_PATH = CONFIG_DIR + "config-switcher.xml"; - public static final String SWITCHER_ELEMENT = "switcher"; - public static final String SWITCHER_NODE = "current_config"; + public static final String CONFIG_YAML_PATH = CONFIG_DIR + "config.yaml"; + public static final String BACKUP_CONFIG_YAML_PATH = CONFIG_DIR + "config-bck.yaml"; public static final String OS_GROUP = "iofog-agent"; public static final String IOFOG_DOCKER_CONTAINER_NAME_PREFIX = "iofog_"; diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LocalLogReader.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LocalLogReader.java new file mode 100644 index 0000000..39e3e5d --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LocalLogReader.java @@ -0,0 +1,464 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.utils; + +import org.eclipse.iofog.utils.configuration.Configuration; +import org.eclipse.iofog.utils.logging.LoggingService; + +import java.io.*; +import java.nio.charset.StandardCharsets; +import java.nio.file.*; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Reads and streams agent self logs from local log files + */ +public class LocalLogReader { + private static final String MODULE_NAME = "LocalLogReader"; + private static final String LOG_FILE_PATTERN = "iofog-agent.%g.log"; + private static final String LATEST_LOG_FILE = "iofog-agent.0.log"; + + private final String sessionId; + private final String iofogUuid; + private final Map tailConfig; + private final LocalLogHandler handler; + private final AtomicBoolean isRunning = new AtomicBoolean(false); + private Thread readerThread; + private WatchService watchService; + private Path logDirectory; + private Path currentLogFile; + + public LocalLogReader(String sessionId, String iofogUuid, Map tailConfig, LocalLogHandler handler) { + this.sessionId = sessionId; + this.iofogUuid = iofogUuid; + this.tailConfig = tailConfig; + this.handler = handler; + this.logDirectory = Paths.get(Configuration.getLogDiskDirectory()); + this.currentLogFile = logDirectory.resolve(LATEST_LOG_FILE); + LoggingService.logDebug(MODULE_NAME, "Created LocalLogReader: sessionId=" + sessionId + + ", logFile=" + currentLogFile); + } + + /** + * Start reading logs + */ + public void start() { + if (isRunning.compareAndSet(false, true)) { + readerThread = new Thread(this::readLogs, "LocalLogReader-" + sessionId); + readerThread.setDaemon(true); + readerThread.start(); + LoggingService.logInfo(MODULE_NAME, "Started LocalLogReader: sessionId=" + sessionId); + } + } + + /** + * Stop reading logs + */ + public void stop() { + if (isRunning.compareAndSet(true, false)) { + if (readerThread != null) { + readerThread.interrupt(); + } + if (watchService != null) { + try { + watchService.close(); + } catch (IOException e) { + LoggingService.logError(MODULE_NAME, "Error closing watch service", e); + } + } + LoggingService.logInfo(MODULE_NAME, "Stopped LocalLogReader: sessionId=" + sessionId); + } + } + + /** + * Cleanup resources and release memory + */ + public void cleanup() { + stop(); // Stop reading first + + // Wait for thread to finish if still running + if (readerThread != null && readerThread.isAlive()) { + try { + readerThread.join(2000); // Wait up to 2 seconds + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LoggingService.logWarning(MODULE_NAME, "Interrupted while waiting for reader thread: " + sessionId); + } + } + + // Explicitly clear all references + readerThread = null; + watchService = null; + currentLogFile = null; + logDirectory = null; + + LoggingService.logDebug(MODULE_NAME, "Cleaned up LocalLogReader resources: sessionId=" + sessionId); + + // Suggest GC (not guaranteed, but helps) + // Note: System.gc() is just a hint, JVM may or may not run GC + System.gc(); + } + + private void readLogs() { + List lines = null; // Initialize to null for cleanup + try { + // Parse tail config + boolean follow = tailConfig != null && tailConfig.containsKey("follow") + ? (Boolean) tailConfig.get("follow") : true; + Integer tailLines = tailConfig != null && tailConfig.containsKey("lines") + ? ((Number) tailConfig.get("lines")).intValue() : 100; + String since = tailConfig != null && tailConfig.containsKey("since") + ? (String) tailConfig.get("since") : null; + String until = tailConfig != null && tailConfig.containsKey("until") + ? (String) tailConfig.get("until") : null; + + // Validate tail lines + if (tailLines < 1) tailLines = 100; + if (tailLines > 10000) tailLines = 10000; + + LoggingService.logDebug(MODULE_NAME, "Reading logs: follow=" + follow + + ", lines=" + tailLines + + ", since=" + since + + ", until=" + until); + + // Check if log file exists + if (!Files.exists(currentLogFile)) { + LoggingService.logWarning(MODULE_NAME, "Log file does not exist: " + currentLogFile); + if (handler != null) { + handler.onError(sessionId, new FileNotFoundException("Log file not found: " + currentLogFile)); + } + return; + } + + // Read initial lines (tail) + lines = readTailLines(currentLogFile, tailLines, since, until); + for (String line : lines) { + if (!isRunning.get()) break; + if (handler != null) { + handler.onLogLine(sessionId, iofogUuid, line); + } + } + + // Explicitly clear the lines list after processing + if (lines != null) { + lines.clear(); + lines = null; + } + + // If follow is true, watch for new lines + if (follow && isRunning.get()) { + watchForNewLines(currentLogFile, since, until); + } + + if (handler != null) { + handler.onComplete(sessionId); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error reading logs: sessionId=" + sessionId, e); + if (handler != null) { + handler.onError(sessionId, e); + } + } finally { + // Ensure cleanup in finally block + if (lines != null) { + lines.clear(); + lines = null; + } + } + } + + private List readTailLines(Path logFile, int tailLines, String since, String until) throws IOException { + long fileSize = Files.size(logFile); + + // For small files (< 10MB), use simple approach but still clear references + if (fileSize < 10 * 1024 * 1024) { + try (Stream lines = Files.lines(logFile, StandardCharsets.UTF_8)) { + List allLines = lines.collect(Collectors.toList()); + List filteredLines = allLines; + + if (since != null || until != null) { + filteredLines = filterByTimestamp(allLines, since, until); + // Clear the original list reference + allLines = null; + } + + int startIndex = Math.max(0, filteredLines.size() - tailLines); + List result = new ArrayList<>(filteredLines.subList(startIndex, filteredLines.size())); + + // Explicitly clear references to help GC + filteredLines = null; + + return result; + } + } + + // For large files, read from the end without loading entire file + return readTailLinesFromEnd(logFile, tailLines, since, until); + } + + /** + * Read tail lines from the end of a large file without loading entire file into memory + */ + private List readTailLinesFromEnd(Path logFile, int tailLines, String since, String until) throws IOException { + List result = new ArrayList<>(); + Deque lastLines = new ArrayDeque<>(tailLines); + + try (RandomAccessFile raf = new RandomAccessFile(logFile.toFile(), "r")) { + long fileLength = raf.length(); + if (fileLength == 0) { + return result; + } + + StringBuilder lineBuffer = new StringBuilder(); + long position = fileLength - 1; + byte[] buffer = new byte[8192]; // 8KB buffer + + while (position >= 0 && lastLines.size() < tailLines) { + int bytesToRead = (int) Math.min(buffer.length, position + 1); + position -= bytesToRead; + raf.seek(position); + raf.readFully(buffer, 0, bytesToRead); + + // Process buffer backwards + for (int i = bytesToRead - 1; i >= 0; i--) { + char c = (char) (buffer[i] & 0xFF); + if (c == '\n') { + if (lineBuffer.length() > 0) { + String line = lineBuffer.reverse().toString(); + // Apply timestamp filtering if needed + if (shouldIncludeLine(line, since, until)) { + lastLines.addFirst(line); + if (lastLines.size() > tailLines) { + lastLines.removeLast(); + } + } + lineBuffer.setLength(0); + } + } else if (c != '\r') { + lineBuffer.append(c); + } + } + } + + // Handle remaining line + if (lineBuffer.length() > 0) { + String line = lineBuffer.reverse().toString(); + if (shouldIncludeLine(line, since, until)) { + lastLines.addFirst(line); + if (lastLines.size() > tailLines) { + lastLines.removeLast(); + } + } + } + } + + return new ArrayList<>(lastLines); + } + + /** + * Check if a line should be included based on timestamp filters + */ + private boolean shouldIncludeLine(String line, String since, String until) { + if (since == null && until == null) { + return true; + } + + Instant lineTime = extractTimestampFromLine(line); + if (lineTime == null) { + return true; // Include if we can't parse timestamp + } + + try { + if (since != null && !since.isEmpty()) { + Instant sinceInstant = Instant.parse(since); + if (lineTime.isBefore(sinceInstant)) { + return false; + } + } + if (until != null && !until.isEmpty()) { + Instant untilInstant = Instant.parse(until); + if (lineTime.isAfter(untilInstant)) { + return false; + } + } + } catch (Exception e) { + // If timestamp parsing fails, include the line + return true; + } + return true; + } + + private List filterByTimestamp(List lines, String since, String until) { + Instant sinceInstant = null; + Instant untilInstant = null; + + try { + if (since != null && !since.isEmpty()) { + sinceInstant = Instant.parse(since); + } + if (until != null && !until.isEmpty()) { + untilInstant = Instant.parse(until); + } + } catch (DateTimeParseException e) { + LoggingService.logWarning(MODULE_NAME, "Invalid timestamp format in filter: " + e.getMessage()); + return lines; // Return all lines if timestamp parsing fails + } + + final Instant sinceFinal = sinceInstant; + final Instant untilFinal = untilInstant; + + return lines.stream() + .filter(line -> { + // Try to extract timestamp from log line + // Log format typically includes timestamp at the beginning + Instant lineTime = extractTimestampFromLine(line); + if (lineTime == null) { + // If we can't parse timestamp, include the line + return true; + } + if (sinceFinal != null && lineTime.isBefore(sinceFinal)) { + return false; + } + if (untilFinal != null && lineTime.isAfter(untilFinal)) { + return false; + } + return true; + }) + .collect(Collectors.toList()); + } + + private Instant extractTimestampFromLine(String line) { + // Try to parse timestamp from log line + // Log format may vary, try common patterns + try { + // Try ISO 8601 format first + if (line.length() > 20) { + String timestampStr = line.substring(0, Math.min(30, line.length())); + return Instant.parse(timestampStr); + } + } catch (Exception e) { + // Try other formats or return null + } + return null; + } + + private void watchForNewLines(Path logFile, String since, String until) throws IOException { + watchService = FileSystems.getDefault().newWatchService(); + logDirectory.register(watchService, StandardWatchEventKinds.ENTRY_MODIFY); + + long lastPosition = Files.size(logFile); + Instant untilInstant = null; + if (until != null && !until.isEmpty()) { + try { + untilInstant = Instant.parse(until); + } catch (DateTimeParseException e) { + LoggingService.logWarning(MODULE_NAME, "Invalid until timestamp format: " + e.getMessage()); + } + } + + final Instant untilFinal = untilInstant; + + while (isRunning.get()) { + try { + // Check for file modifications + WatchKey key = watchService.poll(1, java.util.concurrent.TimeUnit.SECONDS); + if (key != null) { + for (WatchEvent event : key.pollEvents()) { + if (event.kind() == StandardWatchEventKinds.ENTRY_MODIFY) { + Path changedFile = (Path) event.context(); + if (changedFile.toString().equals(logFile.getFileName().toString())) { + readNewLines(logFile, lastPosition, untilFinal); + lastPosition = Files.size(logFile); + } + } + } + key.reset(); + } + + // Also check file size periodically (in case watch service misses events) + long currentSize = Files.size(logFile); + if (currentSize > lastPosition) { + readNewLines(logFile, lastPosition, untilFinal); + lastPosition = currentSize; + } + + // Check until timestamp + if (untilFinal != null && Instant.now().isAfter(untilFinal)) { + LoggingService.logInfo(MODULE_NAME, "Reached until timestamp, stopping log reading"); + break; + } + + Thread.sleep(100); // Small delay to prevent busy waiting + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error watching log file", e); + try { + Thread.sleep(1000); // Wait before retrying + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + break; + } + } + } + } + + private void readNewLines(Path logFile, long startPosition, Instant untilInstant) throws IOException { + try (RandomAccessFile raf = new RandomAccessFile(logFile.toFile(), "r")) { + raf.seek(startPosition); + String line; + while ((line = raf.readLine()) != null && isRunning.get()) { + if (line.isEmpty()) continue; + + // Check until timestamp + if (untilInstant != null) { + Instant lineTime = extractTimestampFromLine(line); + if (lineTime != null && lineTime.isAfter(untilInstant)) { + break; + } + } + + // Convert to UTF-8 (readLine uses platform default encoding) + String utf8Line = new String(line.getBytes(StandardCharsets.ISO_8859_1), StandardCharsets.UTF_8); + if (handler != null) { + handler.onLogLine(sessionId, iofogUuid, utf8Line); + } + } + } + } + + public boolean isRunning() { + return isRunning.get(); + } + + /** + * Handler interface for local log reading + */ + public interface LocalLogHandler { + void onLogLine(String sessionId, String iofogUuid, String line); + void onComplete(String sessionId); + void onError(String sessionId, Throwable throwable); + } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogMessage.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogMessage.java new file mode 100644 index 0000000..4d9cede --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogMessage.java @@ -0,0 +1,45 @@ +package org.eclipse.iofog.utils; + +/** + * Message class for log session WebSocket communication + */ +public class LogMessage { + private byte type; // 6: LOG_LINE, 7: LOG_START, 8: LOG_STOP, 9: LOG_ERROR + private byte[] data; + private String sessionId; + private String microserviceUuid; // For microservice logs + private String iofogUuid; // For fog logs + private long timestamp; + + // Default constructor required by MessagePack + public LogMessage() {} + + public LogMessage(byte type, byte[] data, String sessionId, String microserviceUuid, String iofogUuid) { + this.type = type; + this.data = data; + this.sessionId = sessionId; + this.microserviceUuid = microserviceUuid; + this.iofogUuid = iofogUuid; + this.timestamp = System.currentTimeMillis(); + } + + // Getters and setters + public byte getType() { return type; } + public void setType(byte type) { this.type = type; } + + public byte[] getData() { return data; } + public void setData(byte[] data) { this.data = data; } + + public String getSessionId() { return sessionId; } + public void setSessionId(String sessionId) { this.sessionId = sessionId; } + + public String getMicroserviceUuid() { return microserviceUuid; } + public void setMicroserviceUuid(String microserviceUuid) { this.microserviceUuid = microserviceUuid; } + + public String getIofogUuid() { return iofogUuid; } + public void setIofogUuid(String iofogUuid) { this.iofogUuid = iofogUuid; } + + public long getTimestamp() { return timestamp; } + public void setTimestamp(long timestamp) { this.timestamp = timestamp; } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogSessionWebSocketHandler.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogSessionWebSocketHandler.java new file mode 100644 index 0000000..dd85a41 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/LogSessionWebSocketHandler.java @@ -0,0 +1,659 @@ +package org.eclipse.iofog.utils; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.*; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.websocketx.*; +import io.netty.handler.ssl.SslHandler; +import org.eclipse.iofog.utils.logging.LoggingService; +import org.eclipse.iofog.utils.configuration.Configuration; +import org.eclipse.iofog.utils.JwtManager; +import org.eclipse.iofog.utils.trustmanager.TrustManagers; +import org.msgpack.core.MessageBufferPacker; +import org.msgpack.core.MessagePack; +import org.msgpack.core.MessageUnpacker; +import org.eclipse.iofog.exception.AgentSystemException; + +import java.net.URI; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.io.FileInputStream; +import java.io.IOException; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.security.cert.CertificateException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import javax.net.ssl.SSLContext; +import java.security.SecureRandom; +import javax.net.ssl.SSLEngine; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicInteger; + +public class LogSessionWebSocketHandler { + private static final String MODULE_NAME = "Log Session WebSocket Handler"; + private static final int MAX_RECONNECT_ATTEMPTS = 5; + private static final int RECONNECT_DELAY_MS = 5000; + private static final int PING_INTERVAL_MS = 30000; + private static final int HANDSHAKE_TIMEOUT_MS = 10000; + private static final int MAX_FRAME_SIZE = 65536; + + // Buffer configuration + private static final int MAX_BUFFER_SIZE = 1024 * 1024; // 1MB + private static final int MAX_BUFFERED_FRAMES = 1000; + + // Message type constants + private static final byte TYPE_LOG_LINE = 6; + private static final byte TYPE_LOG_START = 7; + private static final byte TYPE_LOG_STOP = 8; + private static final byte TYPE_LOG_ERROR = 9; + + // Add static map to track existing handlers by sessionId + private static final Map activeHandlers = new ConcurrentHashMap<>(); + + private final String controllerWsUrl; + private final String sessionId; + private final String microserviceUuid; // null for fog logs + private final String iofogUuid; // null for microservice logs + private final AtomicBoolean isConnected; + private final AtomicBoolean isActive; + private final ScheduledExecutorService scheduler; + private ScheduledFuture pingFuture; + private int reconnectAttempts; + private final ObjectMapper objectMapper; + private final MessageUnpacker messageUnpacker; + private final Queue outputBuffer = new ConcurrentLinkedQueue<>(); + private final AtomicLong totalBufferedSize = new AtomicLong(0); + private final AtomicInteger bufferedFrames = new AtomicInteger(0); + private Map tailConfig; // Set when LOG_START is received + private org.eclipse.iofog.field_agent.LogSessionManager logSessionManager; // Reference to start tailing when ready + + private Channel channel; + private EventLoopGroup group; + private WebSocketClientHandshaker handshaker; + private SSLContext sslContext; + + private enum ConnectionState { + DISCONNECTED, + CONNECTING, + CONNECTED, + PENDING, // Connected but waiting for LOG_START + ACTIVE // Connected and received LOG_START, ready to stream + } + + private ConnectionState currentState = ConnectionState.DISCONNECTED; + + private boolean transitionState(ConnectionState from, ConnectionState to) { + synchronized (this) { + if (currentState == from) { + currentState = to; + LoggingService.logInfo(MODULE_NAME, "Connection state transition: " + from + " -> " + to); + return true; + } + return false; + } + } + + public static LogSessionWebSocketHandler getInstance(String sessionId, String microserviceUuid, String iofogUuid) { + return activeHandlers.computeIfAbsent(sessionId, + sid -> new LogSessionWebSocketHandler(sid, microserviceUuid, iofogUuid)); + } + + private LogSessionWebSocketHandler(String sessionId, String microserviceUuid, String iofogUuid) { + try { + // Build WebSocket URL based on log type + if (microserviceUuid != null && !microserviceUuid.isEmpty()) { + this.controllerWsUrl = Configuration.getControllerWSUrl() + "agent/logs/microservice/" + microserviceUuid + "/" + sessionId; + } else if (iofogUuid != null && !iofogUuid.isEmpty()) { + this.controllerWsUrl = Configuration.getControllerWSUrl() + "agent/logs/iofog/" + iofogUuid + "/" + sessionId; + } else { + throw new AgentSystemException("Either microserviceUuid or iofogUuid must be provided", null); + } + + this.sessionId = sessionId; + this.microserviceUuid = microserviceUuid; + this.iofogUuid = iofogUuid; + this.isConnected = new AtomicBoolean(false); + this.isActive = new AtomicBoolean(false); + this.scheduler = Executors.newSingleThreadScheduledExecutor(); + this.objectMapper = new ObjectMapper(); + this.messageUnpacker = MessagePack.newDefaultUnpacker(new byte[0]); + this.reconnectAttempts = 0; + initializeSslContext(); + } catch (AgentSystemException e) { + LoggingService.logError(MODULE_NAME, "Failed to initialize WebSocket handler", e); + throw new RuntimeException("Failed to initialize WebSocket handler", e); + } + } + + private void initializeSslContext() { + try { + Certificate controllerCert = loadControllerCert(); + if (controllerCert != null) { + sslContext = SSLContext.getInstance("TLS"); + sslContext.init(null, TrustManagers.createWebSocketTrustManager(controllerCert), new SecureRandom()); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Failed to initialize SSL context", e); + sslContext = null; + } + } + + private Certificate loadControllerCert() { + try { + if (Configuration.getControllerCert() != null) { + try (FileInputStream fileInputStream = new FileInputStream(Configuration.getControllerCert())) { + CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); + return certificateFactory.generateCertificate(fileInputStream); + } + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Failed to load controller certificate", e); + } + return null; + } + + public void update() { + boolean secure = true; + if (controllerWsUrl.toLowerCase().startsWith("wss")) { + try (FileInputStream fileInputStream = new FileInputStream(Configuration.getControllerCert())) { + Certificate controllerCert = getCert(fileInputStream); + if (controllerCert != null) { + initializeSslContext(); + } else { + secure = false; + } + } catch (IOException e) { + LoggingService.logError(MODULE_NAME, "Failed to load controller certificate", e); + secure = false; + } + } else { + secure = false; + } + + if (!secure) { + LoggingService.logWarning(MODULE_NAME, "Using insecure WebSocket connection"); + } + } + + private Certificate getCert(InputStream is) { + try { + CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); + return certificateFactory.generateCertificate(is); + } catch (CertificateException e) { + LoggingService.logError(MODULE_NAME, "Failed to generate certificate", e); + return null; + } + } + + public void connect() { + if (!transitionState(ConnectionState.DISCONNECTED, ConnectionState.CONNECTING)) { + LoggingService.logWarning(MODULE_NAME, "Connection already in progress or established"); + return; + } + + try { + URI uri = new URI(controllerWsUrl); + final String host = uri.getHost(); + final int port = uri.getPort() > 0 ? uri.getPort() : (uri.getScheme().equals("wss") ? 443 : 80); + + String jwtToken = JwtManager.generateJwt(); + + Bootstrap bootstrap = new Bootstrap(); + group = new NioEventLoopGroup(); + bootstrap.group(group) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer() { + @Override + protected void initChannel(SocketChannel ch) { + ChannelPipeline p = ch.pipeline(); + + // SSL/TLS + if (controllerWsUrl.startsWith("wss") && sslContext != null) { + SSLEngine engine = sslContext.createSSLEngine(host, port); + engine.setUseClientMode(true); + p.addLast("ssl-handler", new SslHandler(engine)); + } + + // HTTP + p.addLast("http-codec", new HttpClientCodec()); + p.addLast("http-aggregator", new HttpObjectAggregator(65536)); + + // WebSocket configuration + WebSocketClientProtocolConfig config = WebSocketClientProtocolConfig.newBuilder() + .webSocketUri(uri) + .version(WebSocketVersion.V13) + .allowExtensions(false) + .customHeaders(new DefaultHttpHeaders() + .add("Authorization", "Bearer " + jwtToken)) + .maxFramePayloadLength(MAX_FRAME_SIZE) + .handleCloseFrames(true) + .dropPongFrames(true) + .handshakeTimeoutMillis(HANDSHAKE_TIMEOUT_MS) + .build(); + + // Add WebSocket protocol handler + p.addLast("ws-protocol-handler", new WebSocketClientProtocolHandler(config)); + + // Custom frame handler + p.addLast("ws-frame-handler", new WebSocketFrameHandler()); + } + }); + + // Connect + LoggingService.logInfo(MODULE_NAME, "Connecting to WebSocket server: " + uri); + channel = bootstrap.connect(host, port).sync().channel(); + LoggingService.logInfo(MODULE_NAME, "Channel connected successfully"); + + // Update connection state + isConnected.set(true); + reconnectAttempts = 0; + + // Start ping scheduler + startPingScheduler(); + + LoggingService.logInfo(MODULE_NAME, "WebSocket connection established successfully"); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Failed to establish WebSocket connection", e); + handleConnectionFailure(); + } + } + + private class WebSocketFrameHandler extends SimpleChannelInboundHandler { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + if (evt instanceof WebSocketClientProtocolHandler.ClientHandshakeStateEvent) { + WebSocketClientProtocolHandler.ClientHandshakeStateEvent handshakeEvent = + (WebSocketClientProtocolHandler.ClientHandshakeStateEvent) evt; + + if (handshakeEvent == WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_COMPLETE) { + LoggingService.logInfo(MODULE_NAME, "WebSocket handshake completed successfully"); + if (transitionState(ConnectionState.CONNECTING, ConnectionState.PENDING)) { + LoggingService.logInfo(MODULE_NAME, "Connection is now pending LOG_START message"); + } + // NO initial message - wait for LOG_START from controller + } else if (evt == WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_TIMEOUT) { + LoggingService.logWarning(MODULE_NAME, "WebSocket handshake timed out"); + handleConnectionFailure(); + } + } + super.userEventTriggered(ctx, evt); + } + + @Override + protected void channelRead0(ChannelHandlerContext ctx, WebSocketFrame frame) { + if (frame instanceof BinaryWebSocketFrame) { + ByteBuf content = frame.content(); + byte[] msgBytes = new byte[content.readableBytes()]; + content.readBytes(msgBytes); + try { + MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(msgBytes); + LogMessage message = new LogMessage(); + + LoggingService.logDebug(MODULE_NAME, "Received binary frame: " + + "length=" + msgBytes.length); + + // Read map header + int mapSize = unpacker.unpackMapHeader(); + + // Read key-value pairs + for (int i = 0; i < mapSize; i++) { + String key = unpacker.unpackString(); + switch (key) { + case "type": + byte type = unpacker.unpackByte(); + message.setType(type); + break; + case "data": + int dataLength = unpacker.unpackBinaryHeader(); + message.setData(unpacker.readPayload(dataLength)); + break; + case "sessionId": + message.setSessionId(unpacker.unpackString()); + break; + case "microserviceUuid": + message.setMicroserviceUuid(unpacker.unpackString()); + break; + case "iofogUuid": + message.setIofogUuid(unpacker.unpackString()); + break; + case "timestamp": + message.setTimestamp(unpacker.unpackLong()); + break; + default: + LoggingService.logWarning(MODULE_NAME, "Unknown message key: " + key); + break; + } + } + + handleMessage(message); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Failed to unpack message: " + + "error=" + e.getMessage() + + ", frameLength=" + msgBytes.length, e); + } + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) { + LoggingService.logInfo(MODULE_NAME, "Channel became inactive"); + handleClose(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + LoggingService.logError(MODULE_NAME, "WebSocket error", cause); + handleClose(); + } + } + + private void handleMessage(LogMessage message) { + if (message == null) return; + LoggingService.logDebug(MODULE_NAME, "Handling message: type=" + message.getType() + + ", sessionId=" + message.getSessionId()); + + switch (message.getType()) { + case TYPE_LOG_START: + handleLogStart(message); + break; + case TYPE_LOG_STOP: + LoggingService.logInfo(MODULE_NAME, "Received LOG_STOP message for session: " + message.getSessionId()); + handleClose(); + break; + case TYPE_LOG_ERROR: + handleLogError(message); + break; + default: + LoggingService.logWarning(MODULE_NAME, "Unknown message type: " + message.getType()); + break; + } + } + + private void handleLogStart(LogMessage message) { + try { + // Parse tailConfig from data + String dataStr = new String(message.getData(), StandardCharsets.UTF_8); + @SuppressWarnings("unchecked") + Map config = objectMapper.readValue(dataStr, Map.class); + @SuppressWarnings("unchecked") + Map tailConfigMap = (Map) config.get("tailConfig"); + this.tailConfig = tailConfigMap; + + LoggingService.logInfo(MODULE_NAME, "Received LOG_START message with tailConfig: sessionId=" + sessionId); + if (transitionState(ConnectionState.PENDING, ConnectionState.ACTIVE)) { + isActive.set(true); + + // Trigger log streaming start in LogSessionManager with tailConfig from LOG_START + if (logSessionManager != null) { + LoggingService.logInfo(MODULE_NAME, "Triggering log streaming start on WebSocket activation: sessionId=" + sessionId); + logSessionManager.startLogStreamingOnActivation(sessionId, tailConfigMap); + } else { + LoggingService.logWarning(MODULE_NAME, "LogSessionManager not set, cannot start log streaming: sessionId=" + sessionId); + } + + // Notify handler that we're ready to stream + flushBufferedOutput(); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error handling LOG_START message", e); + } + } + + private void handleLogError(LogMessage message) { + try { + String errorMsg = new String(message.getData(), StandardCharsets.UTF_8); + LoggingService.logError(MODULE_NAME, "Received LOG_ERROR: " + errorMsg, null); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error handling LOG_ERROR message", e); + } + } + + public void bufferOutput(byte[] payload) { + if (bufferedFrames.get() >= MAX_BUFFERED_FRAMES) { + LoggingService.logWarning(MODULE_NAME, "Maximum frame count reached, dropping frame"); + return; + } + + long currentSize = totalBufferedSize.get(); + if (currentSize + payload.length > MAX_BUFFER_SIZE) { + LoggingService.logWarning(MODULE_NAME, "Buffer full, dropping frame"); + return; + } + + outputBuffer.add(payload); + totalBufferedSize.addAndGet(payload.length); + bufferedFrames.incrementAndGet(); + + LoggingService.logDebug(MODULE_NAME, + "Buffered frame: size=" + payload.length + + ", totalSize=" + totalBufferedSize.get() + + ", frameCount=" + bufferedFrames.get()); + } + + public void flushBufferedOutput() { + LoggingService.logInfo(MODULE_NAME, + "Flushing buffered output: frames=" + bufferedFrames.get() + + ", totalSize=" + totalBufferedSize.get()); + + while (!outputBuffer.isEmpty()) { + byte[] output = outputBuffer.poll(); + if (output != null) { + totalBufferedSize.addAndGet(-output.length); + bufferedFrames.decrementAndGet(); + sendLogLine(output); + } + } + } + + public void sendLogLine(byte[] logLineBytes) { + if (!isConnected.get()) { + LoggingService.logWarning(MODULE_NAME, "Cannot send log line - not connected"); + return; + } + + // If not active, buffer the output + if (!isActive.get()) { + LoggingService.logDebug(MODULE_NAME, "Buffering output while connection is not active: " + + "length=" + logLineBytes.length); + bufferOutput(logLineBytes); + return; + } + + try { + MessageBufferPacker packer = MessagePack.newDefaultBufferPacker(); + packer.packMapHeader(6); // 6 key-value pairs + + // Type + packer.packString("type"); + packer.packByte(TYPE_LOG_LINE); + + // Data + packer.packString("data"); + packer.packBinaryHeader(logLineBytes.length); + packer.writePayload(logLineBytes); + + // Session ID + packer.packString("sessionId"); + packer.packString(sessionId); + + // Microservice UUID or iofog UUID + if (microserviceUuid != null) { + packer.packString("microserviceUuid"); + packer.packString(microserviceUuid); + packer.packString("iofogUuid"); + packer.packNil(); // null for microservice logs + } else { + packer.packString("microserviceUuid"); + packer.packNil(); // null for fog logs + packer.packString("iofogUuid"); + packer.packString(iofogUuid); + } + + // Timestamp + packer.packString("timestamp"); + packer.packLong(System.currentTimeMillis()); + + byte[] msgBytes = packer.toByteArray(); + ByteBuf content = Unpooled.wrappedBuffer(msgBytes); + BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, 0, content); + + channel.writeAndFlush(frame).addListener(future -> { + if (future.isSuccess()) { + LoggingService.logDebug(MODULE_NAME, "Sent log line: " + + "length=" + logLineBytes.length + + ", sessionId=" + sessionId); + } else { + LoggingService.logError(MODULE_NAME, "Failed to send log line", future.cause()); + } + }); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error sending log line", e); + } + } + + public void onActivation() { + LoggingService.logInfo(MODULE_NAME, "WebSocket activated"); + isActive.set(true); + flushBufferedOutput(); + } + + public void handleClose() { + if (!isConnected.get()) { + LoggingService.logDebug(MODULE_NAME, "Already disconnected for session: " + sessionId); + return; + } + + LoggingService.logInfo(MODULE_NAME, "Handling close for session: " + sessionId + + ", connectionState=" + currentState + + ", reconnectAttempts=" + reconnectAttempts); + + isConnected.set(false); + cleanup(); + + LoggingService.logInfo(MODULE_NAME, "Close handling completed for session: " + sessionId); + } + + private void handleConnectionFailure() { + if (reconnectAttempts < MAX_RECONNECT_ATTEMPTS) { + reconnectAttempts++; + LoggingService.logInfo(MODULE_NAME, "Scheduling reconnection attempt " + reconnectAttempts); + scheduler.schedule(this::connect, RECONNECT_DELAY_MS, TimeUnit.MILLISECONDS); + } else { + if (reconnectAttempts >= MAX_RECONNECT_ATTEMPTS) { + LoggingService.logError(MODULE_NAME, "Max reconnection attempts reached", null); + return; + } + cleanup(); + } + } + + private void startPingScheduler() { + if (pingFuture != null) { + pingFuture.cancel(true); + } + + pingFuture = scheduler.scheduleAtFixedRate(() -> { + if (isConnected.get() && channel != null && channel.isActive()) { + try { + channel.writeAndFlush(new PingWebSocketFrame()); + LoggingService.logDebug(MODULE_NAME, "Sent ping frame"); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error sending ping frame", e); + } + } + }, PING_INTERVAL_MS, PING_INTERVAL_MS, TimeUnit.MILLISECONDS); + } + + private void stopPingScheduler() { + if (pingFuture != null) { + pingFuture.cancel(true); + pingFuture = null; + } + } + + public void disconnect() { + LoggingService.logInfo(MODULE_NAME, "Disconnecting WebSocket for session: " + sessionId); + cleanup(); + activeHandlers.remove(sessionId); + } + + private void cleanup() { + LoggingService.logDebug(MODULE_NAME, "Starting cleanup for session: " + sessionId); + + try { + // Stop ping scheduler + stopPingScheduler(); + LoggingService.logDebug(MODULE_NAME, "Stopped ping scheduler"); + + // Close channel if it exists + if (channel != null && channel.isOpen()) { + LoggingService.logDebug(MODULE_NAME, "Closing channel"); + channel.close(); + LoggingService.logDebug(MODULE_NAME, "Channel closed successfully"); + } + + // Shutdown event loop group + if (group != null && !group.isShutdown()) { + LoggingService.logDebug(MODULE_NAME, "Shutting down event loop group"); + group.shutdownGracefully(); + LoggingService.logDebug(MODULE_NAME, "Event loop group shutdown completed"); + } + + // Clear buffers + outputBuffer.clear(); + totalBufferedSize.set(0); + bufferedFrames.set(0); + LoggingService.logDebug(MODULE_NAME, "Cleared output buffers"); + + // Reset state + isActive.set(false); + currentState = ConnectionState.DISCONNECTED; + reconnectAttempts = 0; + LoggingService.logDebug(MODULE_NAME, "Reset connection state"); + + // Remove from active handlers + activeHandlers.remove(sessionId); + LoggingService.logDebug(MODULE_NAME, "Removed from active handlers"); + + LoggingService.logInfo(MODULE_NAME, "Cleanup completed successfully for session: " + sessionId); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error during cleanup for session: " + sessionId, e); + } + } + + public boolean isConnected() { + return isConnected.get() && channel != null && channel.isActive(); + } + + public boolean isActive() { + return isActive.get(); + } + + public Map getTailConfig() { + return tailConfig; + } + + /** + * Set LogSessionManager reference so handler can trigger tailing when WebSocket is activated + */ + public void setLogSessionManager(org.eclipse.iofog.field_agent.LogSessionManager logSessionManager) { + this.logSessionManager = logSessionManager; + } +} + diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Orchestrator.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Orchestrator.java index 4970a3d..656c787 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Orchestrator.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/Orchestrator.java @@ -66,6 +66,11 @@ import java.security.cert.CertificateFactory; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.eclipse.iofog.utils.logging.LoggingService.*; @@ -75,7 +80,14 @@ * @author saeid */ public class Orchestrator { - private static final int CONNECTION_TIMEOUT = 10000; + private static final int CONNECTION_TIMEOUT = 10000; // 10 seconds + // Socket timeout (read timeout) set to 5 minutes - less than JWT expiry of 10 minutes + // This prevents requests from hanging long enough for JWT tokens to expire + private static final int SOCKET_TIMEOUT = 5 * 60 * 1000; // 5 minutes + // Request-level timeout covers DNS resolution, connection, and read phases + private static final int REQUEST_TIMEOUT_SECONDS = 5 * 60; // 5 minutes (same as socket timeout) + private static final ExecutorService executorService = Executors.newCachedThreadPool(); + private String controllerUrl; private String iofogUuid; // private String iofogAccessToken; @@ -120,7 +132,7 @@ public JsonObject provision(String key) throws AgentSystemException { JsonObject result; JsonObject json = Json.createObjectBuilder() .add("key", key) - .add("type", Configuration.getFogType().getCode()) + .add("type", Configuration.getArch().getCode()) .build(); result = request("provision", RequestType.POST, null, json); @@ -137,6 +149,8 @@ private RequestConfig getRequestConfig() throws Exception { return RequestConfig.copy(RequestConfig.DEFAULT) .setLocalAddress(IOFogNetworkInterfaceManager.getInstance().getInetAddress()) .setConnectTimeout(CONNECTION_TIMEOUT) + .setSocketTimeout(SOCKET_TIMEOUT) // Read timeout - prevents requests from hanging indefinitely + .setConnectionRequestTimeout(CONNECTION_TIMEOUT) // Timeout for getting connection from pool .build(); } @@ -185,14 +199,65 @@ private Certificate getCert(InputStream is) { } /** - * gets Json result of a IOFog Controller endpoint + * gets Json result of a IOFog Controller endpoint with request-level timeout protection + * This wraps the actual request in a Future with timeout to prevent hanging on DNS resolution, + * connection establishment, or response reading. * - * @param surl - endpoind to be called + * @param surl - endpoint to be called * @return result in Json format - * @throws AgentSystemException + * @throws AgentUserException */ - private JsonObject getJSON(String surl) throws AgentUserException { - logDebug(MODULE_NAME, "Start getJSON for result of a IOFog Controller endpoint"); + private JsonObject getJSON(String surl) throws AgentUserException { + return getJSONWithTimeout(surl); + } + + /** + * Internal method that wraps getJSONInternal in a Future with timeout protection + * + * @param surl - endpoint to be called + * @return result in Json format + * @throws AgentUserException + */ + private JsonObject getJSONWithTimeout(String surl) throws AgentUserException { + logDebug(MODULE_NAME, "Start getJSONWithTimeout for result of a IOFog Controller endpoint"); + + Future future = executorService.submit(() -> { + return getJSONInternal(surl); + }); + + try { + // Wait for the request with a total timeout that includes DNS resolution, connection, and read + return future.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS); + } catch (TimeoutException e) { + future.cancel(true); + logError(MODULE_NAME, "Request timeout after " + REQUEST_TIMEOUT_SECONDS + " seconds", + new AgentSystemException("Request timeout: " + surl, e)); + throw new AgentUserException("Request timeout after " + REQUEST_TIMEOUT_SECONDS + " seconds: " + surl, e); + } catch (java.util.concurrent.ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof AgentUserException) { + throw (AgentUserException) cause; + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else { + throw new AgentUserException("Error executing request: " + surl, cause); + } + } catch (InterruptedException e) { + future.cancel(true); + Thread.currentThread().interrupt(); + throw new AgentUserException("Request interrupted: " + surl, e); + } + } + + /** + * Internal method that performs the actual HTTP request + * + * @param surl - endpoint to be called + * @return result in Json format + * @throws AgentUserException + */ + private JsonObject getJSONInternal(String surl) throws AgentUserException { + logDebug(MODULE_NAME, "Start getJSONInternal for result of a IOFog Controller endpoint"); // disable certificates for secure mode boolean secure = true; if (!surl.toLowerCase().startsWith("https")) { @@ -316,8 +381,67 @@ private StringBuilder createUri(String command) { private JsonObject getJsonObject(Map queryParams, RequestType requestType, HttpEntity httpEntity, StringBuilder uri) throws Exception { + return getJsonObjectWithTimeout(queryParams, requestType, httpEntity, uri); + } + + /** + * Internal method that wraps getJsonObjectInternal in a Future with timeout protection + * + * @param queryParams - query parameters + * @param requestType - HTTP request type + * @param httpEntity - HTTP entity (body) + * @param uri - request URI + * @return result in Json format + * @throws Exception + */ + private JsonObject getJsonObjectWithTimeout(Map queryParams, RequestType requestType, HttpEntity httpEntity, StringBuilder uri) throws Exception { + logDebug(MODULE_NAME, "Start getJsonObjectWithTimeout"); + + // Create a final copy of the URI for use in the Future + final StringBuilder finalUri = new StringBuilder(uri.toString()); + final Map finalQueryParams = queryParams; + final RequestType finalRequestType = requestType; + final HttpEntity finalHttpEntity = httpEntity; + + Future future = executorService.submit(() -> { + return getJsonObjectInternal(finalQueryParams, finalRequestType, finalHttpEntity, finalUri); + }); + + try { + // Wait for the request with a total timeout that includes DNS resolution, connection, and read + return future.get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS); + } catch (TimeoutException e) { + future.cancel(true); + logError(MODULE_NAME, "Request timeout after " + REQUEST_TIMEOUT_SECONDS + " seconds", + new AgentSystemException("Request timeout: " + finalUri.toString(), e)); + throw new AgentSystemException("Request timeout after " + REQUEST_TIMEOUT_SECONDS + " seconds: " + finalUri.toString(), e); + } catch (java.util.concurrent.ExecutionException e) { + Throwable cause = e.getCause(); + if (cause instanceof Exception) { + throw (Exception) cause; + } else { + throw new AgentSystemException("Error executing request: " + finalUri.toString(), cause); + } + } catch (InterruptedException e) { + future.cancel(true); + Thread.currentThread().interrupt(); + throw new AgentSystemException("Request interrupted: " + finalUri.toString(), e); + } + } + + /** + * Internal method that performs the actual HTTP request + * + * @param queryParams - query parameters + * @param requestType - HTTP request type + * @param httpEntity - HTTP entity (body) + * @param uri - request URI + * @return result in Json format + * @throws Exception + */ + private JsonObject getJsonObjectInternal(Map queryParams, RequestType requestType, HttpEntity httpEntity, StringBuilder uri) throws Exception { // disable certificates for secure mode - logDebug(MODULE_NAME, "Start get JsonObject"); + logDebug(MODULE_NAME, "Start get JsonObjectInternal"); boolean secure = true; if (!controllerUrl.toLowerCase().startsWith("https")) { if (Configuration.isSecureMode()) @@ -398,6 +522,7 @@ private JsonObject getJsonObject(Map queryParams, RequestType re case 400: throw new BadRequestException(errorMessage); case 401: + // TODO: Add retry logic with fresh token FieldAgent.getInstance().deProvision(true); logWarning(MODULE_NAME, "Invalid JWT token, switching controller status to Not provisioned"); throw new AuthenticationException(errorMessage); diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/Configuration.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/Configuration.java index 17fdfb9..306c6cc 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/Configuration.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/Configuration.java @@ -32,21 +32,13 @@ import org.eclipse.iofog.utils.functional.Pair; import org.eclipse.iofog.utils.logging.LoggingService; import org.eclipse.iofog.gps.GpsManager; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.xml.sax.SAXException; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.representer.Representer; +import org.yaml.snakeyaml.nodes.Tag; import java.io.File; +import java.io.FileInputStream; +import java.io.FileWriter; import java.io.IOException; import java.net.InetAddress; import java.net.NetworkInterface; @@ -77,10 +69,7 @@ public final class Configuration { private static final String MODULE_NAME = "Configuration"; - private static Element configElement; - private static Document configFile; - private static Element configSwitcherElement; - private static Document configSwitcherFile; + private static YamlConfig yamlConfig; private static ConfigSwitcherState currentSwitcherState; //Directly configurable params // private static String accessToken; @@ -109,7 +98,7 @@ public final class Configuration { private static long gpsScanFrequency; private static String gpsCoordinates; private static GpsMode gpsMode; - private static ArchitectureType fogType; + private static ArchitectureType arch; private static final Map defaultConfig; private static boolean secureMode; private static String ipAddressExternal; @@ -117,7 +106,7 @@ public final class Configuration { private static long availableDiskThreshold; private static int readyToUpgradeScanFrequency; private static String timeZone; - + private static String namespace; public static boolean debugging = false; @@ -182,7 +171,7 @@ public static void setRouterPort(int routerPort) { private static void updateAutomaticConfigParams() { LoggingService.logInfo(MODULE_NAME, "Start update Automatic ConfigParams "); - switch (fogType) { + switch (arch) { case ARM: statusReportFreqSeconds = 5; // pingControllerFreqSeconds = 30; @@ -278,7 +267,7 @@ public static String getHwSignature() { public static void setHwSignature(String hwSignature) { Configuration.hwSignature = hwSignature; try { - setNode(HW_SIGNATURE, hwSignature, configFile, configElement); + setNode(HW_SIGNATURE, hwSignature); } catch (ConfigurationItemException e) { LoggingService.logError(MODULE_NAME, "Failed to set hardware signature in config", e); } @@ -287,7 +276,7 @@ public static void setHwSignature(String hwSignature) { public static void clearHwSignature() { Configuration.hwSignature = null; try { - setNode(HW_SIGNATURE, null, configFile, configElement); + setNode(HW_SIGNATURE, null); } catch (ConfigurationItemException e) { LoggingService.logError(MODULE_NAME, "Failed to clear hardware signature in config", e); } @@ -366,12 +355,12 @@ public static void setPostDiagnosticsFreq(int postDiagnosticsFreq) { Configuration.postDiagnosticsFreq = postDiagnosticsFreq; } - public static ArchitectureType getFogType() { - return fogType; + public static ArchitectureType getArch() { + return arch; } - public static void setFogType(ArchitectureType fogType) { - Configuration.fogType = fogType; + public static void setArch(ArchitectureType arch) { + Configuration.arch = arch; } public static boolean isSecureMode() { @@ -383,76 +372,85 @@ public static void setSecureMode(boolean secureMode) { } /** - * return XML node value - * - * @param param - node name - * @return node value - * @throws ConfigurationItemException + * Converts snake_case (XML tag format) to camelCase (YAML property format) + * + * @param snakeCase - snake_case string + * @return camelCase string */ - private static String getNode(CommandLineConfigParam param, Document document) { + private static String snakeToCamel(String snakeCase) { + if (snakeCase == null || !snakeCase.contains("_")) { + return snakeCase; + } + String[] parts = snakeCase.split("_"); + StringBuilder camelCase = new StringBuilder(parts[0]); + for (int i = 1; i < parts.length; i++) { + if (parts[i].length() > 0) { + camelCase.append(Character.toUpperCase(parts[i].charAt(0))); + if (parts[i].length() > 1) { + camelCase.append(parts[i].substring(1)); + } + } + } + return camelCase.toString(); + } - Supplier nodeReader = () -> { + /** + * return YAML config value + * + * @param param - config parameter + * @return config value + */ + private static String getNode(CommandLineConfigParam param) { + Supplier valueReader = () -> { String res = null; try { - res = getFirstNodeByTagName(param.getXmlTag(), document).getTextContent(); - } catch (ConfigurationItemException e) { - LoggingService.logError(MODULE_NAME, "Error getting node", e); - System.out.println("[" + MODULE_NAME + "] <" + param.getXmlTag() + "> " - + " item not found or defined more than once. Default value - " + param.getDefaultValue() + " will be used"); - - }catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error getting node", e); + if (yamlConfig == null || yamlConfig.getCurrentProfile() == null) { + return param.getDefaultValue(); + } + ProfileConfig currentProfile = yamlConfig.getProfile(yamlConfig.getCurrentProfile()); + if (currentProfile == null) { + return param.getDefaultValue(); + } + // Convert XML tag (snake_case) to YAML property (camelCase) + String yamlKey = snakeToCamel(param.getXmlTag()); + res = currentProfile.getProperty(yamlKey); + if (res == null || res.isEmpty()) { + res = param.getDefaultValue(); + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error getting config value", e); System.out.println("[" + MODULE_NAME + "] <" + param.getXmlTag() + "> " - + " item not found or defined more than once. Default value - " + param.getDefaultValue() + " will be used"); + + " item not found. Default value - " + param.getDefaultValue() + " will be used"); } return res; }; - return Optional.ofNullable(nodeReader.get()). + return Optional.ofNullable(valueReader.get()). orElseGet(param::getDefaultValue); } /** - * sets XML node value + * sets YAML config value * - * @param param - node param - * @param content - node value + * @param param - config param + * @param content - config value * @throws ConfigurationItemException */ - private static void setNode(CommandLineConfigParam param, String content, Document document, Element node) throws ConfigurationItemException { - LoggingService.logDebug(MODULE_NAME, "Start Setting node : " + param.getCommandName()); - createNodeIfNotExists(param.getXmlTag(), document, node); - getFirstNodeByTagName(param.getXmlTag(), document).setTextContent(content); - LoggingService.logDebug(MODULE_NAME, "Finished Setting node : " + param.getCommandName()); - } - - private static void createNodeIfNotExists(String name, Document document, Element node) { - LoggingService.logDebug(MODULE_NAME, "Start create Node IfNotExists : " + name); - NodeList nodes = node.getElementsByTagName(name); - if (nodes.getLength() == 0) { - node.appendChild(document.createElement(name)); + private static void setNode(CommandLineConfigParam param, String content) throws ConfigurationItemException { + LoggingService.logDebug(MODULE_NAME, "Start Setting config value : " + param.getCommandName()); + if (yamlConfig == null || yamlConfig.getCurrentProfile() == null) { + throw new ConfigurationItemException("Configuration not loaded"); } - LoggingService.logDebug(MODULE_NAME, "Finished create Node IfNotExists : " + name); - } - - /** - * return first XML node from list of nodes found based on provided tag name - * - * @param name - node name - * @return Node object - * @throws ConfigurationItemException - */ - private static Node getFirstNodeByTagName(String name, Document document) throws ConfigurationItemException { - LoggingService.logDebug(MODULE_NAME, "Start get First Node By TagName : " + name); - NodeList nodes = document.getElementsByTagName(name); - - if (nodes.getLength() != 1) { - throw new ConfigurationItemException("<" + name + "> item not found or defined more than once"); + ProfileConfig currentProfile = yamlConfig.getProfile(yamlConfig.getCurrentProfile()); + if (currentProfile == null) { + throw new ConfigurationItemException("Current profile not found: " + yamlConfig.getCurrentProfile()); } - LoggingService.logDebug(MODULE_NAME, "Finished get First Node By TagName : " + name); - return nodes.item(0); + // Convert XML tag (snake_case) to YAML property (camelCase) + String yamlKey = snakeToCamel(param.getXmlTag()); + currentProfile.setProperty(yamlKey, content); + LoggingService.logDebug(MODULE_NAME, "Finished Setting config value : " + param.getCommandName()); } - public static HashMap getOldNodeValuesForParameters(Set parameters, Document document) throws ConfigurationItemException { + public static HashMap getOldNodeValuesForParameters(Set parameters) throws ConfigurationItemException { LoggingService.logDebug(MODULE_NAME, "Start get Old Node Values For Parameters : "); @@ -461,7 +459,7 @@ public static HashMap getOldNodeValuesForParameters(Set for (String option : parameters) { CommandLineConfigParam cmdOption = getCommandByName(option) .orElseThrow(() -> new ConfigurationItemException("Invalid parameter -" + option)); - result.put(cmdOption.getCommandName(), getNode(cmdOption, document)); + result.put(cmdOption.getCommandName(), getNode(cmdOption)); } LoggingService.logDebug(MODULE_NAME, "Finished get Old Node Values For Parameters : "); @@ -486,7 +484,7 @@ public static void saveConfigUpdates() throws Exception { EdgeGuardManager.getInstance().changeEdgeGuardFreqInterval(); // LoggingService.instanceConfigUpdated(); - updateConfigFile(getCurrentConfigPath(), configFile); + updateConfigFile(getCurrentConfigPath()); LoggingService.logInfo(MODULE_NAME, "Finished updating agent configurations"); } @@ -498,26 +496,40 @@ public static void saveGpsConfigUpdates() throws Exception { public static void updateConfigBackUpFile() { try { - updateConfigFile(getBackUpConfigPath(), configFile); + updateConfigFile(getBackUpConfigPath()); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error saving backup config File", e); } } /** - * saves configuration data to config.xml + * saves configuration data to config.yaml * * @throws Exception */ - private static void updateConfigFile(String filePath, Document newFile) throws Exception { + private static void updateConfigFile(String filePath) throws Exception { try { - LoggingService.logInfo(MODULE_NAME, "Start updating configuration data to config.xml"); - Transformer transformer = TransformerFactory.newInstance().newTransformer(); - transformer.setOutputProperty(OutputKeys.INDENT, "yes"); - StreamResult result = new StreamResult(new File(filePath)); - DOMSource source = new DOMSource(newFile); - transformer.transform(source, result); - LoggingService.logInfo(MODULE_NAME, "Finished saving configuration data to config.xml"); + LoggingService.logInfo(MODULE_NAME, "Start updating configuration data to config.yaml"); + if (yamlConfig == null) { + throw new ConfigurationItemException("Configuration not loaded"); + } + + DumperOptions options = new DumperOptions(); + options.setIndent(2); + options.setPrettyFlow(true); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + + Representer representer = new Representer(options); + representer.getPropertyUtils().setSkipMissingProperties(true); + // Map custom classes to Tag.MAP to prevent writing class tags + representer.addClassTag(YamlConfig.class, Tag.MAP); + representer.addClassTag(ProfileConfig.class, Tag.MAP); + + Yaml yaml = new Yaml(representer, options); + try (FileWriter writer = new FileWriter(filePath)) { + yaml.dump(yamlConfig, writer); + } + LoggingService.logInfo(MODULE_NAME, "Finished saving configuration data to config.yaml"); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error saving config File", e); throw new AgentSystemException("Error updating config file : "+ filePath, e); @@ -567,14 +579,14 @@ public static HashMap setConfig(Map commandLineM break; } setDiskLimit(Float.parseFloat(value)); - setNode(DISK_CONSUMPTION_LIMIT, value, configFile, configElement); + setNode(DISK_CONSUMPTION_LIMIT, value); break; case DISK_DIRECTORY: LoggingService.logInfo(MODULE_NAME, "Setting disk directory"); value = addSeparator(value); setDiskDirectory(value); - setNode(DISK_DIRECTORY, value, configFile, configElement); + setNode(DISK_DIRECTORY, value); break; case MEMORY_CONSUMPTION_LIMIT: LoggingService.logInfo(MODULE_NAME, "Setting memory consumption limit"); @@ -589,7 +601,7 @@ public static HashMap setConfig(Map commandLineM break; } setMemoryLimit(Float.parseFloat(value)); - setNode(MEMORY_CONSUMPTION_LIMIT, value, configFile, configElement); + setNode(MEMORY_CONSUMPTION_LIMIT, value); break; case PROCESSOR_CONSUMPTION_LIMIT: LoggingService.logInfo(MODULE_NAME, "Setting processor consumption limit"); @@ -604,22 +616,22 @@ public static HashMap setConfig(Map commandLineM break; } setCpuLimit(Float.parseFloat(value)); - setNode(PROCESSOR_CONSUMPTION_LIMIT, value, configFile, configElement); + setNode(PROCESSOR_CONSUMPTION_LIMIT, value); break; case CONTROLLER_URL: LoggingService.logInfo(MODULE_NAME, "Setting controller url"); - setNode(CONTROLLER_URL, value, configFile, configElement); + setNode(CONTROLLER_URL, value); setControllerUrl(value); break; case CONTROLLER_CERT: LoggingService.logInfo(MODULE_NAME, "Setting controller cert"); - setNode(CONTROLLER_CERT, value, configFile, configElement); + setNode(CONTROLLER_CERT, value); setControllerCert(value); break; case DOCKER_URL: LoggingService.logInfo(MODULE_NAME, "Setting docker url"); if (value.startsWith("tcp://") || value.startsWith("unix://")) { - setNode(DOCKER_URL, value, configFile, configElement); + setNode(DOCKER_URL, value); setDockerUrl(value); } else { messageMap.put(option, "Unsupported protocol scheme. Only 'tcp://' or 'unix://' supported.\n"); @@ -629,7 +641,7 @@ public static HashMap setConfig(Map commandLineM case NETWORK_INTERFACE: LoggingService.logInfo(MODULE_NAME, "Setting disk network interface"); if (defaults || isValidNetworkInterface(value.trim())) { - setNode(NETWORK_INTERFACE, value, configFile, configElement); + setNode(NETWORK_INTERFACE, value); setNetworkInterface(value); IOFogNetworkInterfaceManager.getInstance().updateIOFogNetworkInterface(); } else { @@ -649,14 +661,14 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Log disk limit range must be 0.5 to 100 GB"); break; } - setNode(LOG_DISK_CONSUMPTION_LIMIT, value, configFile, configElement); + setNode(LOG_DISK_CONSUMPTION_LIMIT, value); setLogDiskLimit(Float.parseFloat(value)); updateLogger = true; break; case LOG_DISK_DIRECTORY: LoggingService.logInfo(MODULE_NAME, "Setting log disk directory"); value = addSeparator(value); - setNode(LOG_DISK_DIRECTORY, value, configFile, configElement); + setNode(LOG_DISK_DIRECTORY, value); setLogDiskDirectory(value); updateLogger = true; break; @@ -672,7 +684,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Log file count range must be 1 to 100"); break; } - setNode(LOG_FILE_COUNT, value, configFile, configElement); + setNode(LOG_FILE_COUNT, value); setLogFileCount(Integer.parseInt(value)); updateLogger = true; break; @@ -684,7 +696,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Option -" + option + " has invalid value: " + value); break; } - setNode(LOG_LEVEL, value.toUpperCase(), configFile, configElement); + setNode(LOG_LEVEL, value.toUpperCase()); setLogLevel(value.toUpperCase()); updateLogger = true; break; @@ -700,7 +712,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Status update frequency must be greater than 1"); break; } - setNode(STATUS_FREQUENCY, value, configFile, configElement); + setNode(STATUS_FREQUENCY, value); setStatusFrequency(Integer.parseInt(value)); break; case CHANGE_FREQUENCY: @@ -715,7 +727,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Get changes frequency must be greater than 1"); break; } - setNode(CHANGE_FREQUENCY, value, configFile, configElement); + setNode(CHANGE_FREQUENCY, value); setChangeFrequency(Integer.parseInt(value)); break; case DEVICE_SCAN_FREQUENCY: @@ -730,7 +742,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Get scan devices frequency must be greater than 1"); break; } - setNode(DEVICE_SCAN_FREQUENCY, value, configFile, configElement); + setNode(DEVICE_SCAN_FREQUENCY, value); setDeviceScanFrequency(Integer.parseInt(value)); break; case POST_DIAGNOSTICS_FREQ: @@ -745,7 +757,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Post diagnostics frequency must be greater than 1"); break; } - setNode(POST_DIAGNOSTICS_FREQ, value, configFile, configElement); + setNode(POST_DIAGNOSTICS_FREQ, value); setPostDiagnosticsFreq(Integer.parseInt(value)); break; case WATCHDOG_ENABLED: @@ -754,7 +766,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Option -" + option + " has invalid value: " + value); break; } - setNode(WATCHDOG_ENABLED, value, configFile, configElement); + setNode(WATCHDOG_ENABLED, value); setWatchdogEnabled(!value.equals("off")); break; case EDGE_GUARD_FREQUENCY: @@ -769,12 +781,12 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Edge guard frequency must be positive value"); break; } - setNode(EDGE_GUARD_FREQUENCY, value, configFile, configElement); + setNode(EDGE_GUARD_FREQUENCY, value); setEdgeGuardFrequency(longValue); break; case GPS_DEVICE: LoggingService.logInfo(MODULE_NAME, "Setting gps device"); - setNode(GPS_DEVICE, value, configFile, configElement); + setNode(GPS_DEVICE, value); setGpsDevice(value); break; case GPS_SCAN_FREQUENCY: @@ -786,11 +798,19 @@ public static HashMap setConfig(Map commandLineM break; } if (longValue < 0) { - messageMap.put(option, "Gps scan frequency must be positive value"); + messageMap.put(option, "Gps scan frequency must be 0 or positive value (0 disables scheduler)"); break; } - setNode(GPS_SCAN_FREQUENCY, value, configFile, configElement); + setNode(GPS_SCAN_FREQUENCY, value); setGpsScanFrequency(longValue); + // Notify GPS Manager to update scheduler frequency + CompletableFuture.runAsync(() -> { + try { + GpsManager.getInstance().changeGpsScanFrequencyInterval(); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error updating GPS scan frequency interval", e); + } + }); break; case GPS_MODE: LoggingService.logInfo(MODULE_NAME, "Setting gps mode"); @@ -814,11 +834,11 @@ public static HashMap setConfig(Map commandLineM break; } break; - case FOG_TYPE: - LoggingService.logInfo(MODULE_NAME, "Setting fogtype"); + case ARCH: + LoggingService.logInfo(MODULE_NAME, "Setting arch"); try { - configureFogType(value); - setNode(FOG_TYPE, value, configFile, configElement); + configureArch(value); + setNode(ARCH, value); } catch (ConfigurationItemException e){ messageMap.put(option, "Option -" + option + " has invalid value: " + value); break; @@ -826,7 +846,7 @@ public static HashMap setConfig(Map commandLineM break; case SECURE_MODE: LoggingService.logInfo(MODULE_NAME, "Setting secure mode"); - setNode(SECURE_MODE, value, configFile, configElement); + setNode(SECURE_MODE, value); setSecureMode(!value.equals("off")); break; case ROUTER_HOST: @@ -849,7 +869,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Docker pruning frequency must be positive value"); break; } - setNode(DOCKER_PRUNING_FREQUENCY, value, configFile, configElement); + setNode(DOCKER_PRUNING_FREQUENCY, value); setDockerPruningFrequency(Long.parseLong(value)); break; case AVAILABLE_DISK_THRESHOLD: @@ -864,7 +884,7 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "Available disk threshold must be greater than 1"); break; } - setNode(AVAILABLE_DISK_THRESHOLD, value, configFile, configElement); + setNode(AVAILABLE_DISK_THRESHOLD, value); setAvailableDiskThreshold(Long.parseLong(value)); break; case READY_TO_UPGRADE_SCAN_FREQUENCY: @@ -879,13 +899,13 @@ public static HashMap setConfig(Map commandLineM messageMap.put(option, "isReadyToUpgrade scan frequency must be greater than 1"); break; } - setNode(READY_TO_UPGRADE_SCAN_FREQUENCY, value, configFile, configElement); + setNode(READY_TO_UPGRADE_SCAN_FREQUENCY, value); setReadyToUpgradeScanFrequency(Integer.parseInt(value)); FieldAgent.getInstance().changeReadInterval(); break; case DEV_MODE: LoggingService.logInfo(MODULE_NAME, "Setting dev mode"); - setNode(DEV_MODE, value, configFile, configElement); + setNode(DEV_MODE, value); setDevMode(!value.equals("off")); break; case TIME_ZONE: @@ -904,10 +924,6 @@ public static HashMap setConfig(Map commandLineM LoggingService.logInfo(MODULE_NAME, "Setting TLS key"); setTlsKey(value); break; - // case PRIVATE_KEY: - // LoggingService.logInfo(MODULE_NAME, "Setting privateKey"); - // setPrivateKey(value); - // break; default: throw new ConfigurationItemException("Invalid parameter -" + option); } @@ -925,7 +941,7 @@ public static HashMap setConfig(Map commandLineM throw e; } finally { if (configUpdateError) { - updateConfigFile(getBackUpConfigPath(), configFile); + updateConfigFile(getBackUpConfigPath()); } } } else { @@ -940,36 +956,36 @@ public static HashMap setConfig(Map commandLineM } /** - * Configures fogType. + * Configures arch. * - * @param fogTypeCommand could be "auto" or string that matches one of the {@link ArchitectureType} patterns + * @param archCommand could be "auto" or string that matches one of the {@link ArchitectureType} patterns * @throws ConfigurationItemException if {@link ArchitectureType} undefined */ - private static void configureFogType(String fogTypeCommand) throws ConfigurationItemException { - LoggingService.logInfo(MODULE_NAME, "Start configure FogType "); - ArchitectureType newFogType = ArchitectureType.UNDEFINED; - switch (fogTypeCommand) { + private static void configureArch(String archCommand) throws ConfigurationItemException { + LoggingService.logInfo(MODULE_NAME, "Start configure Arch "); + ArchitectureType newArch = ArchitectureType.UNDEFINED; + switch (archCommand) { case "auto": { - newFogType = ArchitectureType.getArchTypeByArchName(System.getProperty("os.arch")); + newArch = ArchitectureType.getArchTypeByArchName(System.getProperty("os.arch")); break; } case "intel_amd": { - newFogType = ArchitectureType.INTEL_AMD; + newArch = ArchitectureType.INTEL_AMD; break; } case "arm": { - newFogType = ArchitectureType.ARM; + newArch = ArchitectureType.ARM; break; } } - if (newFogType == ArchitectureType.UNDEFINED) { - throw new ConfigurationItemException("Couldn't autodetect fogType or unknown fogType type was set."); + if (newArch == ArchitectureType.UNDEFINED) { + throw new ConfigurationItemException("Couldn't autodetect arch or unknown arch type was set."); } - setFogType(newFogType); + setArch(newArch); updateAutomaticConfigParams(); - LoggingService.logInfo(MODULE_NAME, "Finished configure FogType : " + newFogType); + LoggingService.logInfo(MODULE_NAME, "Finished configure Arch : " + newArch); } /** @@ -993,6 +1009,9 @@ private static void configureGps(String gpsModeCommand, String gpsCoordinatesCom } else if (GpsMode.OFF.name().toLowerCase().equals(gpsModeCommand)) { gpsCoordinates = ""; currentMode = GpsMode.OFF; + // Automatically set GPS scan frequency to 0 when mode is OFF + setGpsScanFrequency(0); + LoggingService.logDebug(MODULE_NAME, "GPS mode set to OFF - GPS scan frequency automatically set to 0"); } else if (GpsMode.DYNAMIC.name().toLowerCase().equals(gpsModeCommand)) { gpsCoordinates = ""; currentMode = GpsMode.DYNAMIC; @@ -1034,8 +1053,8 @@ public static void writeGpsToConfigFile() throws ConfigurationItemException { LoggingService.logDebug(MODULE_NAME, "Start writing GPS coordinates and GPS mode to config file"); - setNode(GPS_MODE, gpsMode.name().toLowerCase(), configFile, configElement); - setNode(GPS_COORDINATES, gpsCoordinates, configFile, configElement); + setNode(GPS_MODE, gpsMode.name().toLowerCase()); + setNode(GPS_COORDINATES, gpsCoordinates); LoggingService.logDebug(MODULE_NAME, "Finished writing GPS coordinates and GPS mode to config file"); } @@ -1119,82 +1138,108 @@ private static String addSeparator(String value) { /** - * loads configuration from config.xml file + * loads configuration from config.yaml file * * @throws ConfigurationItemException */ public static void loadConfig() throws ConfigurationItemException { LoggingService.logInfo(MODULE_NAME, "Start load Config"); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = null; boolean isConfigError = false; - try { - builder = factory.newDocumentBuilder(); - } catch (ParserConfigurationException e) { - LoggingService.logError(MODULE_NAME, "Error while parsing config xml", new ConfigurationItemException(e.getMessage(), e)); - throw new ConfigurationItemException(e.getMessage(), e); - } - - try { - configFile = builder.parse(getCurrentConfigPath()); - } catch (Exception e) { + Yaml yaml = new Yaml(); + + try (FileInputStream inputStream = new FileInputStream(getCurrentConfigPath())) { + yamlConfig = yaml.loadAs(inputStream, YamlConfig.class); + if (yamlConfig == null) { + throw new ConfigurationItemException("Failed to load YAML configuration"); + } + } catch (Exception e) { isConfigError = true; - LoggingService.logError(MODULE_NAME, "Error while parsing config xml", new ConfigurationItemException("Error while parsing config xml", e)); - } + LoggingService.logError(MODULE_NAME, "Error while parsing config yaml", new ConfigurationItemException("Error while parsing config yaml", e)); + } + if (isConfigError) { - try { - configFile = builder.parse(getBackUpConfigPath()); + try (FileInputStream inputStream = new FileInputStream(getBackUpConfigPath())) { + yamlConfig = yaml.loadAs(inputStream, YamlConfig.class); + if (yamlConfig == null) { + throw new ConfigurationItemException("Failed to load backup YAML configuration"); + } } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error while parsing backup config xml", new ConfigurationItemException("Error while parsing config xml", e)); - throw new ConfigurationItemException("Error while parsing config xml and backup config xml"); + LoggingService.logError(MODULE_NAME, "Error while parsing backup config yaml", new ConfigurationItemException("Error while parsing config yaml", e)); + throw new ConfigurationItemException("Error while parsing config yaml and backup config yaml"); } } - configFile.getDocumentElement().normalize(); - - configElement = (Element) getFirstNodeByTagName("config", configFile); - - setIofogUuid(getNode(IOFOG_UUID, configFile)); - setPrivateKey(getNode(PRIVATE_KEY, configFile)); - setControllerUrl(getNode(CONTROLLER_URL, configFile)); - setControllerCert(getNode(CONTROLLER_CERT, configFile)); - setNetworkInterface(getNode(NETWORK_INTERFACE, configFile)); - setDockerUrl(getNode(DOCKER_URL, configFile)); - setDiskLimit(Float.parseFloat(getNode(DISK_CONSUMPTION_LIMIT, configFile))); - setDiskDirectory(getNode(DISK_DIRECTORY, configFile)); - setMemoryLimit(Float.parseFloat(getNode(MEMORY_CONSUMPTION_LIMIT, configFile))); - setCpuLimit(Float.parseFloat(getNode(PROCESSOR_CONSUMPTION_LIMIT, configFile))); - setLogDiskDirectory(getNode(LOG_DISK_DIRECTORY, configFile)); - setLogDiskLimit(Float.parseFloat(getNode(LOG_DISK_CONSUMPTION_LIMIT, configFile))); - setLogFileCount(Integer.parseInt(getNode(LOG_FILE_COUNT, configFile))); - setLogLevel(getNode(LOG_LEVEL, configFile)); - setGpsDevice(getNode(GPS_DEVICE, configFile)); - setGpsScanFrequency(Long.parseLong(getNode(GPS_SCAN_FREQUENCY, configFile))); - configureGps(getNode(GPS_MODE, configFile), getNode(GPS_COORDINATES, configFile)); - setChangeFrequency(Integer.parseInt(getNode(CHANGE_FREQUENCY, configFile))); - setDeviceScanFrequency(Integer.parseInt(getNode(DEVICE_SCAN_FREQUENCY, configFile))); - setStatusFrequency(Integer.parseInt(getNode(STATUS_FREQUENCY, configFile))); - setPostDiagnosticsFreq(Integer.parseInt(getNode(POST_DIAGNOSTICS_FREQ, configFile))); - setWatchdogEnabled(!getNode(WATCHDOG_ENABLED, configFile).equals("off")); - setEdgeGuardFrequency(Long.parseLong(getNode(EDGE_GUARD_FREQUENCY, configFile))); - configureFogType(getNode(FOG_TYPE, configFile)); - setSecureMode(!getNode(SECURE_MODE, configFile).equals("off")); - setIpAddressExternal(GpsWebHandler.getExternalIp()); - setRouterHost(getNode(ROUTER_HOST, configFile)); - setRouterPort(!getNode(ROUTER_PORT, configFile).equals("") ? Integer.parseInt(getNode(ROUTER_PORT, configFile)) : 0); - - setDockerPruningFrequency(Long.parseLong(getNode(DOCKER_PRUNING_FREQUENCY, configFile))); - setAvailableDiskThreshold(Long.parseLong(getNode(AVAILABLE_DISK_THRESHOLD, configFile))); - setReadyToUpgradeScanFrequency(Integer.parseInt(getNode(READY_TO_UPGRADE_SCAN_FREQUENCY, configFile))); - setDevMode(!getNode(DEV_MODE, configFile).equals("off")); - configureTimeZone(getNode(TIME_ZONE, configFile)); - setCaCert(getNode(CA_CERT, configFile)); - setTlsCert(getNode(TLS_CERT, configFile)); - setTlsKey(getNode(TLS_KEY, configFile)); + // Validate and set current profile + String currentProfileStr = yamlConfig.getCurrentProfile(); + if (currentProfileStr == null || currentProfileStr.isEmpty()) { + currentProfileStr = ConfigSwitcherState.DEFAULT.fullValue(); + yamlConfig.setCurrentProfile(currentProfileStr); + } + + try { + currentSwitcherState = ConfigSwitcherState.parse(currentProfileStr); + } catch (IllegalArgumentException e) { + LoggingService.logError(MODULE_NAME, "Error while reading current profile state, using default config", + new ConfigurationItemException(e.getMessage(), e)); + currentSwitcherState = ConfigSwitcherState.DEFAULT; + yamlConfig.setCurrentProfile(ConfigSwitcherState.DEFAULT.fullValue()); + } + // Ensure current profile exists + if (!yamlConfig.getProfiles().containsKey(currentProfileStr)) { + LoggingService.logWarning(MODULE_NAME, "Current profile not found: " + currentProfileStr + ", using default"); + currentSwitcherState = ConfigSwitcherState.DEFAULT; + yamlConfig.setCurrentProfile(ConfigSwitcherState.DEFAULT.fullValue()); + } + + ProfileConfig currentProfile = yamlConfig.getProfile(yamlConfig.getCurrentProfile()); + if (currentProfile == null) { + throw new ConfigurationItemException("Current profile configuration not found: " + yamlConfig.getCurrentProfile()); + } + + // Load all configuration values from current profile + setIofogUuid(getNode(IOFOG_UUID)); + setPrivateKey(getNode(PRIVATE_KEY)); + setControllerUrl(getNode(CONTROLLER_URL)); + setControllerCert(getNode(CONTROLLER_CERT)); + setNetworkInterface(getNode(NETWORK_INTERFACE)); + setDockerUrl(getNode(DOCKER_URL)); + setDiskLimit(Float.parseFloat(getNode(DISK_CONSUMPTION_LIMIT))); + setDiskDirectory(getNode(DISK_DIRECTORY)); + setMemoryLimit(Float.parseFloat(getNode(MEMORY_CONSUMPTION_LIMIT))); + setCpuLimit(Float.parseFloat(getNode(PROCESSOR_CONSUMPTION_LIMIT))); + setLogDiskDirectory(getNode(LOG_DISK_DIRECTORY)); + setLogDiskLimit(Float.parseFloat(getNode(LOG_DISK_CONSUMPTION_LIMIT))); + setLogFileCount(Integer.parseInt(getNode(LOG_FILE_COUNT))); + setLogLevel(getNode(LOG_LEVEL)); + setGpsDevice(getNode(GPS_DEVICE)); + setGpsScanFrequency(Long.parseLong(getNode(GPS_SCAN_FREQUENCY))); + configureGps(getNode(GPS_MODE), getNode(GPS_COORDINATES)); + setChangeFrequency(Integer.parseInt(getNode(CHANGE_FREQUENCY))); + setDeviceScanFrequency(Integer.parseInt(getNode(DEVICE_SCAN_FREQUENCY))); + setStatusFrequency(Integer.parseInt(getNode(STATUS_FREQUENCY))); + setPostDiagnosticsFreq(Integer.parseInt(getNode(POST_DIAGNOSTICS_FREQ))); + setWatchdogEnabled(!getNode(WATCHDOG_ENABLED).equals("off")); + setEdgeGuardFrequency(Long.parseLong(getNode(EDGE_GUARD_FREQUENCY))); + configureArch(getNode(ARCH)); + setSecureMode(!getNode(SECURE_MODE).equals("off")); + setIpAddressExternal(GpsWebHandler.getExternalIp()); + setRouterHost(getNode(ROUTER_HOST)); + setRouterPort(!getNode(ROUTER_PORT).equals("") ? Integer.parseInt(getNode(ROUTER_PORT)) : 0); + + setDockerPruningFrequency(Long.parseLong(getNode(DOCKER_PRUNING_FREQUENCY))); + setAvailableDiskThreshold(Long.parseLong(getNode(AVAILABLE_DISK_THRESHOLD))); + setReadyToUpgradeScanFrequency(Integer.parseInt(getNode(READY_TO_UPGRADE_SCAN_FREQUENCY))); + setDevMode(!getNode(DEV_MODE).equals("off")); + configureTimeZone(getNode(TIME_ZONE)); + setCaCert(getNode(CA_CERT)); + setTlsCert(getNode(TLS_CERT)); + setTlsKey(getNode(TLS_KEY)); + setNamespace(getNode(NAMESPACE)); + try { - updateConfigFile(getCurrentConfigPath(), configFile); + updateConfigFile(getCurrentConfigPath()); } catch (Exception e) { try { LoggingService.logError(MODULE_NAME, "Error saving config", e); @@ -1203,7 +1248,7 @@ public static void loadConfig() throws ConfigurationItemException { } } finally { try { - updateConfigFile(getBackUpConfigPath(), configFile); + updateConfigFile(getBackUpConfigPath()); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error saving config back up file", e); } @@ -1212,55 +1257,15 @@ public static void loadConfig() throws ConfigurationItemException { } /** - * loads configuration about current config from config-switcher.xml + * loads configuration about current profile from config.yaml + * This method is now merged into loadConfig() but kept for backward compatibility * * @throws ConfigurationItemException */ public static void loadConfigSwitcher() throws ConfigurationItemException { - LoggingService.logInfo(MODULE_NAME, "Start loads configuration about current config from config-switcher.xml"); - - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = null; - try { - builder = factory.newDocumentBuilder(); - } catch (ParserConfigurationException e) { - LoggingService.logError(MODULE_NAME, "Error while parsing config switcher xml", e); - throw new ConfigurationItemException(e.getMessage(), e); - } - - try { - configSwitcherFile = builder.parse(CONFIG_SWITCHER_PATH); - } catch (SAXException e) { - LoggingService.logError(MODULE_NAME, "Error while parsing config switcher xml", - new ConfigurationItemException(e.getMessage(), e)); - throw new ConfigurationItemException(e.getMessage(), e); - } catch (IOException e) { - LoggingService.logError(MODULE_NAME, "Error while parsing config switcher xml", - new ConfigurationItemException(e.getMessage(), e)); - throw new ConfigurationItemException(e.getMessage(), e); - } - configSwitcherFile.getDocumentElement().normalize(); - - configSwitcherElement = (Element) getFirstNodeByTagName(SWITCHER_ELEMENT, configSwitcherFile); - - verifySwitcherNode(SWITCHER_NODE, ConfigSwitcherState.DEFAULT.fullValue()); - LoggingService.logInfo(MODULE_NAME, "Finished loading configuration about current config from config-switcher.xml"); - } - - // this code will be triggered in case of iofog updated (not newly installed) and add new option for config - private static void createConfigProperty(CommandLineConfigParam cmdParam) throws Exception { - LoggingService.logDebug(MODULE_NAME, "Start create config property"); - // TODO: add appropriate handling of case when 0 nodes found or multiple before adding new property to file - Element el = configFile.createElement(cmdParam.getXmlTag()); - el.appendChild(configFile.createTextNode(cmdParam.getDefaultValue())); - configElement.appendChild(el); - - DOMSource source = new DOMSource(configFile); - TransformerFactory transformerFactory = TransformerFactory.newInstance(); - Transformer transformer = transformerFactory.newTransformer(); - StreamResult result = new StreamResult(getCurrentConfigPath()); - transformer.transform(source, result); - LoggingService.logDebug(MODULE_NAME, "Finished create config property"); + LoggingService.logInfo(MODULE_NAME, "Start loads configuration about current profile from config.yaml"); + // Profile loading is now handled in loadConfig() + LoggingService.logInfo(MODULE_NAME, "Finished loading configuration about current profile from config.yaml"); } // public static String getAccessToken() { @@ -1327,40 +1332,18 @@ public static void setLogDiskDirectory(String logDiskDirectory) { // public static void setAccessToken(String accessToken) throws ConfigurationItemException { // LoggingService.logDebug(MODULE_NAME, "Start set access token"); - // setNode(ACCESS_TOKEN, accessToken, configFile, configElement); + // setNode(ACCESS_TOKEN, accessToken); // Configuration.accessToken = accessToken; // LoggingService.logDebug(MODULE_NAME, "Finished set access token"); // } public static void setIofogUuid(String iofogUuid) throws ConfigurationItemException { LoggingService.logDebug(MODULE_NAME, "Start set Iofog uuid"); - setNode(IOFOG_UUID, iofogUuid, configFile, configElement); + setNode(IOFOG_UUID, iofogUuid); Configuration.iofogUuid = iofogUuid; LoggingService.logDebug(MODULE_NAME, "Finished set Iofog uuid"); } - private static void verifySwitcherNode(String switcher, String defaultValue) throws ConfigurationItemException { - LoggingService.logInfo(MODULE_NAME, "Start verify Switcher Node"); - - NodeList nodes = configSwitcherElement.getElementsByTagName(switcher); - if (nodes.getLength() == 0) { - configSwitcherElement.appendChild(configSwitcherFile.createElement(switcher)); - getFirstNodeByTagName(switcher, configSwitcherFile).setTextContent(defaultValue); - currentSwitcherState = ConfigSwitcherState.DEFAULT; - } else { - String currentState = getFirstNodeByTagName(switcher, configSwitcherFile).getTextContent(); - try { - currentSwitcherState = ConfigSwitcherState.parse(currentState); - } catch (IllegalArgumentException e) { - currentSwitcherState = ConfigSwitcherState.DEFAULT; - System.out.println("Error while reading current switcher state, using default config"); - LoggingService.logError(MODULE_NAME, "Error while reading current switcher state, using default config", - new ConfigurationItemException(e.getMessage(), e)); - throw new ConfigurationItemException(e.getMessage(), e); - } - } - LoggingService.logInfo(MODULE_NAME, "Finished verify Switcher Node"); - } private static void setControllerUrl(String controllerUrl) { LoggingService.logDebug(MODULE_NAME, "Set ControllerUrl"); @@ -1472,7 +1455,7 @@ public static String getConfigReport() { // gps coordinates result.append(buildReportLine(getConfigParamMessage(GPS_COORDINATES), gpsCoordinates)); //fog type - result.append(buildReportLine(getConfigParamMessage(FOG_TYPE), fogType.name().toLowerCase())); + result.append(buildReportLine(getConfigParamMessage(ARCH), arch.name().toLowerCase())); // docker pruning frequency result.append(buildReportLine(getConfigParamMessage(DOCKER_PRUNING_FREQUENCY), format("%d", dockerPruningFrequency))); // available disk threshold @@ -1486,13 +1469,17 @@ public static String getConfigReport() { // result.append(buildReportLine(getConfigParamMessage(CA_CERT), caCert != null ? "configured" : "not configured")); // result.append(buildReportLine(getConfigParamMessage(TLS_CERT), tlsCert != null ? "configured" : "not configured")); // result.append(buildReportLine(getConfigParamMessage(TLS_KEY), tlsKey != null ? "configured" : "not configured")); + // namespace + result.append(buildReportLine(getConfigParamMessage(NAMESPACE), + (namespace != null && !namespace.isEmpty()) ? namespace : "default")); LoggingService.logDebug(MODULE_NAME, "Finished get Config Report"); return result.toString(); } private static String buildReportLine(String messageDescription, String value) { - return rightPad(messageDescription, 40, ' ') + " : " + value + "\\n"; + String safeValue = (value == null) ? "" : value; + return rightPad(messageDescription, 40, ' ') + " : " + safeValue + "\\n"; } public static String getNetworkInterfaceInfo() { @@ -1506,24 +1493,16 @@ public static String getNetworkInterfaceInfo() { return networkInterfaceName + "(" + NETWORK_INTERFACE.getDefaultValue() + ")"; } - public static Document getCurrentConfig() { - return configFile; + public static YamlConfig getCurrentConfig() { + return yamlConfig; } public static String getCurrentConfigPath() { - switch (currentSwitcherState) { - case DEVELOPMENT: - return Constants.DEVELOPMENT_CONFIG_PATH; - case PRODUCTION: - return Constants.PRODUCTION_CONFIG_PATH; - case DEFAULT: - default: - return Constants.DEFAULT_CONFIG_PATH; - } + return Constants.CONFIG_YAML_PATH; } public static String getBackUpConfigPath() { - return Constants.BACKUP_CONFIG_PATH; + return Constants.BACKUP_CONFIG_YAML_PATH; } public static String setupConfigSwitcher(ConfigSwitcherState state) { @@ -1546,7 +1525,7 @@ public static void load() { System.out.println(ExceptionUtils.getStackTrace(e)); System.exit(1); } catch (Exception e) { - System.out.println("Error while parsing " + Constants.CONFIG_SWITCHER_PATH); + System.out.println("Error while parsing config.yaml"); System.out.println(e.getMessage()); System.out.println(ExceptionUtils.getStackTrace(e)); System.exit(1); @@ -1570,10 +1549,12 @@ public static void load() { private static String reload(ConfigSwitcherState newState, ConfigSwitcherState previousState) { try { - getFirstNodeByTagName(SWITCHER_NODE, configSwitcherFile).setTextContent(newState.fullValue()); - updateConfigFile(CONFIG_SWITCHER_PATH, configSwitcherFile); + if (yamlConfig == null) { + throw new ConfigurationItemException("Configuration not loaded"); + } + yamlConfig.setCurrentProfile(newState.fullValue()); + updateConfigFile(getCurrentConfigPath()); - Configuration.loadConfigSwitcher(); Configuration.loadConfig(); FieldAgent.getInstance().instanceConfigUpdated(); @@ -1584,8 +1565,10 @@ private static String reload(ConfigSwitcherState newState, ConfigSwitcherState p return "Successfully switched to new configuration."; } catch (Exception e) { try { - getFirstNodeByTagName(SWITCHER_NODE, configSwitcherFile).setTextContent(previousState.fullValue()); - updateConfigFile(CONFIG_SWITCHER_PATH, configSwitcherFile); + if (yamlConfig != null) { + yamlConfig.setCurrentProfile(previousState.fullValue()); + updateConfigFile(getCurrentConfigPath()); + } load(); @@ -1680,7 +1663,7 @@ public static String getTimeZone() { public static void setTimeZone(String timeZone) throws ConfigurationItemException { LoggingService.logDebug(MODULE_NAME, "Start set timeZone"); - setNode(TIME_ZONE, timeZone, configFile, configElement); + setNode(TIME_ZONE, timeZone); Configuration.timeZone = timeZone; LoggingService.logDebug(MODULE_NAME, "Finished set timeZone"); @@ -1692,7 +1675,7 @@ public static String getPrivateKey() { public static void setPrivateKey(String privateKey) throws ConfigurationItemException { LoggingService.logDebug(MODULE_NAME, "Start set private key"); - setNode(PRIVATE_KEY, privateKey, configFile, configElement); + setNode(PRIVATE_KEY, privateKey); Configuration.privateKey = privateKey; LoggingService.logDebug(MODULE_NAME, "Finished set private key"); } @@ -1721,6 +1704,19 @@ public static void setTlsKey(String tlsKey) { Configuration.tlsKey = tlsKey; } + public static String getNamespace() { + return namespace; + } + + public static void setNamespace(String namespace) throws ConfigurationItemException { + LoggingService.logDebug(MODULE_NAME, "Start set namespace"); + // Ensure namespace is never null or empty - default to "default" + String safeNamespace = (namespace == null || namespace.isEmpty()) ? "default" : namespace; + setNode(NAMESPACE, safeNamespace); + Configuration.namespace = safeNamespace; + LoggingService.logDebug(MODULE_NAME, "Finished set namespace"); + } + /** * Converts the controller HTTP/HTTPS URL to its WebSocket equivalent (ws/wss). * Preserves port numbers and path components. diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/ProfileConfig.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/ProfileConfig.java new file mode 100644 index 0000000..a713c7c --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/ProfileConfig.java @@ -0,0 +1,34 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.utils.configuration; + +import java.util.LinkedHashMap; + +/** + * Represents a single profile's configuration + * Extends LinkedHashMap to store all properties for flexible YAML mapping + */ +public class ProfileConfig extends LinkedHashMap { + + public ProfileConfig() { + super(); + } + + public String getProperty(String key) { + return get(key); + } + + public void setProperty(String key, String value) { + put(key, value != null ? value : ""); + } +} diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/YamlConfig.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/YamlConfig.java new file mode 100644 index 0000000..2c6f3aa --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/utils/configuration/YamlConfig.java @@ -0,0 +1,76 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.utils.configuration; + +import java.util.HashMap; +import java.util.Map; + +/** + * Represents the root YAML configuration structure + */ +public class YamlConfig { + private String currentProfile; + private Map profiles; + + public YamlConfig() { + this.profiles = new HashMap<>(); + } + + public String getCurrentProfile() { + return currentProfile; + } + + public void setCurrentProfile(String currentProfile) { + this.currentProfile = currentProfile; + } + + public Map getProfiles() { + return profiles; + } + + @SuppressWarnings("unchecked") + public void setProfiles(Map profiles) { + if (profiles == null) { + this.profiles = new HashMap<>(); + return; + } + // Convert any Map instances to ProfileConfig + this.profiles = new HashMap<>(); + for (Map.Entry entry : profiles.entrySet()) { + Object profileValue = entry.getValue(); + ProfileConfig profile; + if (profileValue instanceof ProfileConfig) { + profile = (ProfileConfig) profileValue; + } else if (profileValue instanceof Map) { + // Convert Map to ProfileConfig + profile = new ProfileConfig(); + profile.putAll((Map) profileValue); + } else { + // Skip invalid entries + continue; + } + this.profiles.put(entry.getKey(), profile); + } + } + + public ProfileConfig getProfile(String profileName) { + return profiles.get(profileName); + } + + public void setProfile(String profileName, ProfileConfig profile) { + if (profiles == null) { + profiles = new HashMap<>(); + } + profiles.put(profileName, profile); + } +} diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountManager.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountManager.java index d808066..ba8b11f 100644 --- a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountManager.java +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountManager.java @@ -22,14 +22,30 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.LinkOption; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermissions; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.DosFileAttributeView; +import java.nio.file.attribute.UserPrincipalLookupService; +import java.nio.file.FileSystems; +import java.nio.file.attribute.GroupPrincipal; import java.util.Base64; import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.HashSet; +import java.util.List; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import java.security.MessageDigest; import java.nio.charset.StandardCharsets; import java.util.Comparator; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import org.apache.commons.lang3.SystemUtils; /** * Manages volume mounts for microservices @@ -40,10 +56,17 @@ public class VolumeMountManager { private static final String MODULE_NAME = "VolumeMountManager"; private static final String VOLUMES_DIR = "volumes"; private static final String INDEX_FILE = "index.json"; + private static final String SECRETS_DIR = "secrets"; + private static final String CONFIGMAPS_DIR = "configMaps"; + private static final String MICROSERVICES_DIR = "microservices"; + private static final int MAX_VERSION_HISTORY = 1; // Keep only current version for simplicity + private static final String DATA_SYMLINK = "..data"; private static VolumeMountManager instance; private final String baseDirectory; private JsonObject indexData; + private final Object indexLock = new Object(); + private final Map typeCache = new ConcurrentHashMap<>(); private VolumeMountManager() { this.baseDirectory = Configuration.getDiskDirectory() + VOLUMES_DIR + "/"; @@ -77,6 +100,8 @@ private void init() { // Load or create index file loadIndex(); + // Rebuild type cache after loading index + rebuildTypeCache(); LoggingService.logInfo(MODULE_NAME, "Volume mount manager initialized successfully"); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error initializing volume mount manager", @@ -88,66 +113,227 @@ private void init() { * Loads the index file or creates it if it doesn't exist */ private void loadIndex() { + synchronized (indexLock) { + try { + Path indexFile = Paths.get(baseDirectory + INDEX_FILE); + Path backupFile = Paths.get(baseDirectory + INDEX_FILE + ".bak"); + + if (!Files.exists(indexFile)) { + LoggingService.logDebug(MODULE_NAME, "Creating new index file"); + // Create new index file with empty data + indexData = Json.createObjectBuilder().build(); + saveIndex(); + } else { + LoggingService.logDebug(MODULE_NAME, "Loading existing index file"); + // Load existing index file + try (JsonReader reader = Json.createReader(new FileReader(indexFile.toFile()))) { + JsonObject fileData = reader.readObject(); + String storedChecksum = fileData.getString("checksum"); + JsonObject data = fileData.getJsonObject("data"); + + // Verify checksum + String computedChecksum = checksum(data.toString()); + if (!computedChecksum.equals(storedChecksum)) { + LoggingService.logError(MODULE_NAME, "Index file checksum verification failed", + new AgentSystemException("Index file may have been tampered with")); + // Try to restore from backup + if (Files.exists(backupFile)) { + LoggingService.logInfo(MODULE_NAME, "Attempting to restore index from backup"); + restoreIndexFromBackup(); + } else { + // Initialize empty index if checksum fails and no backup + indexData = Json.createObjectBuilder().build(); + } + return; + } + + indexData = data; + // Migrate old index format if needed + migrateIndexFormatIfNeeded(); + } + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error loading index file", + new AgentSystemException(e.getMessage(), e)); + // Try to restore from backup + restoreIndexFromBackup(); + } + } + } + + /** + * Migrates old index format to new format (adds type and microservices fields) + */ + private void migrateIndexFormatIfNeeded() { try { - File indexFile = new File(baseDirectory + INDEX_FILE); - if (!indexFile.exists()) { - LoggingService.logDebug(MODULE_NAME, "Creating new index file"); - // Create new index file with empty data - indexData = Json.createObjectBuilder().build(); - saveIndex(); - } else { - LoggingService.logDebug(MODULE_NAME, "Loading existing index file"); - // Load existing index file - try (JsonReader reader = Json.createReader(new FileReader(indexFile))) { - JsonObject fileData = reader.readObject(); - String storedChecksum = fileData.getString("checksum"); - JsonObject data = fileData.getJsonObject("data"); + boolean needsMigration = false; + JsonObjectBuilder newIndexBuilder = Json.createObjectBuilder(); + + for (String uuid : indexData.keySet()) { + JsonObject mountData = indexData.getJsonObject(uuid); + if (mountData != null) { + JsonObjectBuilder mountBuilder = Json.createObjectBuilder(mountData); + + // Add type field if missing (default to secret for backward compatibility) + if (!mountData.containsKey("type")) { + mountBuilder.add("type", "secret"); + needsMigration = true; + } - // Verify checksum - String computedChecksum = checksum(data.toString()); - if (!computedChecksum.equals(storedChecksum)) { - LoggingService.logError(MODULE_NAME, "Index file checksum verification failed", - new AgentSystemException("Index file may have been tampered with")); - // Initialize empty index if checksum fails - indexData = Json.createObjectBuilder().build(); - return; + // Add microservices array if missing + if (!mountData.containsKey("microservices")) { + mountBuilder.add("microservices", Json.createArrayBuilder().build()); + needsMigration = true; } + newIndexBuilder.add(uuid, mountBuilder.build()); + } else { + newIndexBuilder.add(uuid, mountData); + } + } + + if (needsMigration) { + LoggingService.logInfo(MODULE_NAME, "Migrating index format to new schema"); + indexData = newIndexBuilder.build(); + saveIndex(); + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error migrating index format: " + e.getMessage()); + } + } + + /** + * Restores index from backup file + */ + private void restoreIndexFromBackup() { + try { + Path backupFile = Paths.get(baseDirectory + INDEX_FILE + ".bak"); + if (Files.exists(backupFile)) { + LoggingService.logInfo(MODULE_NAME, "Restoring index from backup"); + try (JsonReader reader = Json.createReader(new FileReader(backupFile.toFile()))) { + JsonObject fileData = reader.readObject(); + JsonObject data = fileData.getJsonObject("data"); indexData = data; + // Save restored index + saveIndex(); } + } else { + // Initialize empty index if no backup available + indexData = Json.createObjectBuilder().build(); } } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error loading index file", + LoggingService.logError(MODULE_NAME, "Error restoring index from backup", new AgentSystemException(e.getMessage(), e)); - // Initialize empty index if loading fails + // Initialize empty index if restore fails indexData = Json.createObjectBuilder().build(); } } /** - * Saves the index file + * Saves the index file atomically with backup */ private void saveIndex() { - try { - LoggingService.logDebug(MODULE_NAME, "Saving index file"); - File indexFile = new File(baseDirectory + INDEX_FILE); - - // Create wrapper object with checksum and timestamp - JsonObject wrapper = Json.createObjectBuilder() - .add("checksum", checksum(indexData.toString())) - .add("timestamp", System.currentTimeMillis()) - .add("data", indexData) - .build(); + synchronized (indexLock) { + try { + LoggingService.logDebug(MODULE_NAME, "Saving index file"); + Path indexFile = Paths.get(baseDirectory + INDEX_FILE); + Path tempFile = Paths.get(baseDirectory + INDEX_FILE + ".tmp"); + Path backupFile = Paths.get(baseDirectory + INDEX_FILE + ".bak"); + + // Create wrapper object with checksum and timestamp + JsonObject wrapper = Json.createObjectBuilder() + .add("checksum", checksum(indexData.toString())) + .add("timestamp", System.currentTimeMillis()) + .add("data", indexData) + .build(); + + // Write to temp file + try (JsonWriter writer = Json.createWriter(new FileWriter(tempFile.toFile()))) { + writer.writeObject(wrapper); + } + + // Create backup of current index only if it exists and is different + // Backup purpose: Recovery from corruption or failed writes + if (Files.exists(indexFile)) { + // Only create backup if checksum is different (avoid identical files) + try { + try (JsonReader reader = Json.createReader(new FileReader(indexFile.toFile()))) { + JsonObject currentFileData = reader.readObject(); + String currentChecksum = currentFileData.getString("checksum"); + String newChecksum = wrapper.getString("checksum"); + if (!currentChecksum.equals(newChecksum)) { + Files.copy(indexFile, backupFile, StandardCopyOption.REPLACE_EXISTING); + } + } + } catch (Exception e) { + // If we can't compare, create backup anyway for safety + Files.copy(indexFile, backupFile, StandardCopyOption.REPLACE_EXISTING); + } + } - try (JsonWriter writer = Json.createWriter(new FileWriter(indexFile))) { - writer.writeObject(wrapper); + // Atomic move temp to final location + Files.move(tempFile, indexFile, + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + + // Update volume mount status + StatusReporter.setVolumeMountManagerStatus(indexData.size(), System.currentTimeMillis()); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error saving index file", + new AgentSystemException(e.getMessage(), e)); + // Restore from backup if available + restoreIndexFromBackup(); + } + } + } + + /** + * Rebuilds the type cache from index data + */ + private void rebuildTypeCache() { + synchronized (indexLock) { + typeCache.clear(); + if (indexData != null) { + indexData.forEach((uuid, mountDataValue) -> { + try { + JsonObject mountData = mountDataValue.asJsonObject(); + String name = mountData.getString("name"); + String type = mountData.getString("type", "secret"); + typeCache.put(name, "secret".equals(type) ? VolumeMountType.SECRET : VolumeMountType.CONFIGMAP); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error rebuilding type cache for UUID: " + uuid); + } + }); } - - // Update volume mount status - StatusReporter.setVolumeMountManagerStatus(indexData.size(), System.currentTimeMillis()); - } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error saving index file", - new AgentSystemException(e.getMessage(), e)); + } + } + + /** + * Gets the volume mount type by name (from cache) + * @param volumeName The volume mount name + * @return VolumeMountType or null if not found + */ + public VolumeMountType getVolumeMountType(String volumeName) { + return typeCache.get(volumeName); + } + + /** + * Gets volume mount info by name + * @param volumeName The volume mount name + * @return JsonObject with volume mount info or null if not found + */ + public JsonObject getVolumeMountByName(String volumeName) { + synchronized (indexLock) { + if (indexData == null) { + return null; + } + for (String uuid : indexData.keySet()) { + JsonObject mountData = indexData.getJsonObject(uuid); + if (mountData != null && mountData.getString("name", "").equals(volumeName)) { + return mountData; + } + } + return null; } } @@ -156,45 +342,89 @@ private void saveIndex() { * @param volumeMounts Array of volume mount objects from controller */ public void processVolumeMountChanges(JsonArray volumeMounts) { - try { - LoggingService.logInfo(MODULE_NAME, "Processing volume mount changes"); - // Get existing volume mounts from index - Set existingUuids = indexData.keySet(); - - // Get new volume mount UUIDs - Set newUuids = volumeMounts.stream() - .map(JsonValue::asJsonObject) - .map(obj -> obj.getString("uuid")) - .collect(Collectors.toSet()); - - // Handle removed volume mounts - existingUuids.stream() - .filter(uuid -> !newUuids.contains(uuid)) - .forEach(this::deleteVolumeMount); - - // Handle new and updated volume mounts - volumeMounts.forEach(mount -> { - JsonObject volumeMount = mount.asJsonObject(); - String uuid = volumeMount.getString("uuid"); - - if (existingUuids.contains(uuid)) { - LoggingService.logDebug(MODULE_NAME, "Updating volume mount: " + uuid); - updateVolumeMount(volumeMount); - } else { - LoggingService.logDebug(MODULE_NAME, "Creating new volume mount: " + uuid); - createVolumeMount(volumeMount); - } - }); - - // Save updated index (status will be updated by saveIndex) - saveIndex(); - LoggingService.logInfo(MODULE_NAME, "Volume mount changes processed successfully"); - } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error processing volume mount changes", - new AgentSystemException(e.getMessage(), e)); + synchronized (indexLock) { + try { + LoggingService.logInfo(MODULE_NAME, "Processing volume mount changes"); + // Get existing volume mounts from index + Set existingUuids = indexData.keySet(); + + // Get new volume mount UUIDs + Set newUuids = volumeMounts.stream() + .map(JsonValue::asJsonObject) + .map(obj -> obj.getString("uuid")) + .collect(Collectors.toSet()); + + // Handle removed volume mounts + existingUuids.stream() + .filter(uuid -> !newUuids.contains(uuid)) + .forEach(this::deleteVolumeMount); + + // Handle new and updated volume mounts + volumeMounts.forEach(mount -> { + JsonObject volumeMount = mount.asJsonObject(); + String uuid = volumeMount.getString("uuid"); + + if (existingUuids.contains(uuid)) { + LoggingService.logDebug(MODULE_NAME, "Updating volume mount: " + uuid); + updateVolumeMount(volumeMount); + } else { + LoggingService.logDebug(MODULE_NAME, "Creating new volume mount: " + uuid); + createVolumeMount(volumeMount); + } + }); + + // Rebuild type cache after updates + rebuildTypeCache(); + + // Save updated index (status will be updated by saveIndex) + saveIndex(); + LoggingService.logInfo(MODULE_NAME, "Volume mount changes processed successfully"); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error processing volume mount changes", + new AgentSystemException(e.getMessage(), e)); + } } } + /** + * Gets the directory name for a volume mount type + * @param type VolumeMountType + * @return Directory name (secrets or configMaps) + */ + private String getTypeDirectory(VolumeMountType type) { + return type == VolumeMountType.SECRET ? SECRETS_DIR : CONFIGMAPS_DIR; + } + + /** + * Gets the type prefix for per-microservice directories + * @param type VolumeMountType + * @return Type prefix (datasance.com~secret or datasance.com~configmap) + */ + private String getTypePrefix(VolumeMountType type) { + return type == VolumeMountType.SECRET ? "datasance.com~secret" : "datasance.com~configmap"; + } + + /** + * Parses volume mount type from JSON + * @param volumeMount Volume mount JSON object + * @return VolumeMountType + */ + private VolumeMountType parseVolumeMountType(JsonObject volumeMount) { + String typeStr = volumeMount.getString("type", "secret"); + return "secret".equals(typeStr) ? VolumeMountType.SECRET : VolumeMountType.CONFIGMAP; + } + + /** + * Creates a versioned directory name + * @return Versioned directory name (e.g., ..2025_01_01_01_01_10.123456789) + */ + private String createVersionedDirectoryName() { + LocalDateTime now = LocalDateTime.now(); + String timestamp = now.format(DateTimeFormatter.ofPattern("yyyy_MM_dd_HH_mm_ss")); + long nanoseconds = System.nanoTime() % 1_000_000_000; + return String.format("..%s.%09d", timestamp, nanoseconds); + } + /** * Creates a new volume mount * @param volumeMount Volume mount object from controller @@ -205,39 +435,74 @@ private void createVolumeMount(JsonObject volumeMount) { String name = volumeMount.getString("name"); int version = volumeMount.getInt("version"); JsonObject data = volumeMount.getJsonObject("data"); + VolumeMountType type = parseVolumeMountType(volumeMount); - LoggingService.logDebug(MODULE_NAME, String.format("Creating volume mount - UUID: %s, Name: %s, Version: %d", - uuid, name, version)); + LoggingService.logDebug(MODULE_NAME, String.format("Creating volume mount - UUID: %s, Name: %s, Version: %d, Type: %s", + uuid, name, version, type)); - // Create directory for volume mount - Path mountPath = Paths.get(baseDirectory + name); + // Create type-specific directory structure + String typeDir = getTypeDirectory(type); + Path mountPath = Paths.get(baseDirectory, typeDir, name); Files.createDirectories(mountPath); - // Create files and store paths in index + // Create versioned directory + String versionDirName = createVersionedDirectoryName(); + Path versionDir = mountPath.resolve(versionDirName); + Files.createDirectories(versionDir); + + // Create files in versioned directory JsonObjectBuilder dataBuilder = Json.createObjectBuilder(); data.forEach((key, value) -> { try { String decodedContent = decodeBase64(value.toString()); - Path filePath = mountPath.resolve(key); + Path filePath = versionDir.resolve(key); // Create parent directories if they don't exist Path parentDir = filePath.getParent(); if (parentDir != null) { Files.createDirectories(parentDir); } Files.write(filePath, decodedContent.getBytes()); - // Store relative path instead of content - dataBuilder.add(key, filePath.toString()); + // Set file permissions based on type + setFilePermissions(filePath, type); + // Store key in index + dataBuilder.add(key, key); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error creating file: " + key, new AgentSystemException(e.getMessage(), e)); } }); - // Update index with paths instead of content + // Create ..data symlink pointing to versioned directory + Path dataLink = mountPath.resolve(DATA_SYMLINK); + if (Files.exists(dataLink)) { + Files.delete(dataLink); + } + Files.createSymbolicLink(dataLink, Paths.get(versionDirName)); + setSymlinkPermissions(dataLink); + + // Create per-key symlinks + data.forEach((key, value) -> { + try { + Path keyLink = mountPath.resolve(key); + if (Files.exists(keyLink)) { + Files.delete(keyLink); + } + Files.createSymbolicLink(keyLink, Paths.get(DATA_SYMLINK + "/" + key)); + setSymlinkPermissions(keyLink); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error creating symlink for key: " + key, + new AgentSystemException(e.getMessage(), e)); + } + }); + + // Update index with new schema + JsonArray microservicesArray = Json.createArrayBuilder().build(); JsonObject mountData = Json.createObjectBuilder() .add("name", name) + .add("type", type == VolumeMountType.SECRET ? "secret" : "configMap") .add("version", version) .add("data", dataBuilder) + .add("microservices", microservicesArray) .build(); indexData = Json.createObjectBuilder(indexData) @@ -262,9 +527,10 @@ private void updateVolumeMount(JsonObject volumeMount) { String name = volumeMount.getString("name"); int version = volumeMount.getInt("version"); JsonObject data = volumeMount.getJsonObject("data"); + VolumeMountType type = parseVolumeMountType(volumeMount); - LoggingService.logDebug(MODULE_NAME, String.format("Updating volume mount - UUID: %s, Name: %s, Version: %d", - uuid, name, version)); + LoggingService.logDebug(MODULE_NAME, String.format("Updating volume mount - UUID: %s, Name: %s, Version: %d, Type: %s", + uuid, name, version, type)); // Get current version from index JsonObject currentMount = indexData.getJsonObject(uuid); @@ -278,35 +544,111 @@ private void updateVolumeMount(JsonObject volumeMount) { } } - // Create or update directory for volume mount - Path mountPath = Paths.get(baseDirectory + name); + // Get old keys for deletion + Set oldKeys = new HashSet<>(); + if (currentMount != null && currentMount.containsKey("data")) { + JsonObject oldData = currentMount.getJsonObject("data"); + oldKeys.addAll(oldData.keySet()); + } + Set newKeys = new HashSet<>(data.keySet()); + Set deletedKeys = new HashSet<>(oldKeys); + deletedKeys.removeAll(newKeys); + + // Create type-specific directory structure + String typeDir = getTypeDirectory(type); + Path mountPath = Paths.get(baseDirectory, typeDir, name); Files.createDirectories(mountPath); - // Update files and store paths in index + // Create new versioned directory + String versionDirName = createVersionedDirectoryName(); + Path versionDir = mountPath.resolve(versionDirName); + Files.createDirectories(versionDir); + + // Create files in new versioned directory JsonObjectBuilder dataBuilder = Json.createObjectBuilder(); data.forEach((key, value) -> { try { String decodedContent = decodeBase64(value.toString()); - Path filePath = mountPath.resolve(key); + Path filePath = versionDir.resolve(key); // Create parent directories if they don't exist Path parentDir = filePath.getParent(); if (parentDir != null) { Files.createDirectories(parentDir); } Files.write(filePath, decodedContent.getBytes()); - // Store relative path instead of content - dataBuilder.add(key, filePath.toString()); + // Set file permissions based on type + setFilePermissions(filePath, type); + // Store key in index + dataBuilder.add(key, key); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error updating file: " + key, new AgentSystemException(e.getMessage(), e)); } }); - // Update index with paths instead of content + // Atomically swap ..data symlink to point to new version + Path dataLink = mountPath.resolve(DATA_SYMLINK); + Path newDataLink = mountPath.resolve(DATA_SYMLINK + ".tmp"); + if (Files.exists(newDataLink)) { + Files.delete(newDataLink); + } + Files.createSymbolicLink(newDataLink, Paths.get(versionDirName)); + setSymlinkPermissions(newDataLink); + Files.move(newDataLink, dataLink, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + + // Update per-key symlinks (create new, update existing) + data.forEach((key, value) -> { + try { + Path keyLink = mountPath.resolve(key); + if (Files.exists(keyLink)) { + Files.delete(keyLink); + } + Files.createSymbolicLink(keyLink, Paths.get(DATA_SYMLINK + "/" + key)); + setSymlinkPermissions(keyLink); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error updating symlink for key: " + key, + new AgentSystemException(e.getMessage(), e)); + } + }); + + // Delete symlinks for removed keys + deletedKeys.forEach(key -> { + try { + Path keyLink = mountPath.resolve(key); + if (Files.exists(keyLink)) { + // Check if it's actually a symlink before deleting + if (Files.isSymbolicLink(keyLink)) { + Files.delete(keyLink); + LoggingService.logDebug(MODULE_NAME, "Deleted symlink for removed key: " + key); + } else { + // If it's not a symlink, still delete it (shouldn't happen, but be safe) + Files.delete(keyLink); + LoggingService.logWarning(MODULE_NAME, "Deleted non-symlink file for removed key: " + key); + } + } + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error deleting symlink for removed key: " + key, + new AgentSystemException(e.getMessage(), e)); + // Don't just log warning - this is important, so log as error + } + }); + + // Clean up old version directories (keep last MAX_VERSION_HISTORY) + cleanupOldVersions(mountPath); + + // Get existing microservices list + JsonArray microservicesArray = Json.createArrayBuilder().build(); + if (currentMount != null && currentMount.containsKey("microservices")) { + microservicesArray = currentMount.getJsonArray("microservices"); + } + + // Update index with new schema JsonObject mountData = Json.createObjectBuilder() .add("name", name) + .add("type", type == VolumeMountType.SECRET ? "secret" : "configMap") .add("version", version) .add("data", dataBuilder) + .add("microservices", microservicesArray) .build(); indexData = Json.createObjectBuilder(indexData) @@ -314,6 +656,10 @@ private void updateVolumeMount(JsonObject volumeMount) { .build(); saveIndex(); + + // Sync symlinks in per-microservice directories to reflect the update + syncMicroserviceSymlinks(name, type); + LoggingService.logDebug(MODULE_NAME, "Volume mount updated successfully: " + uuid); } catch (Exception e) { LoggingService.logError(MODULE_NAME, "Error updating volume mount", @@ -321,6 +667,106 @@ private void updateVolumeMount(JsonObject volumeMount) { } } + /** + * Cleans up old version directories, keeping only the last MAX_VERSION_HISTORY versions + * @param mountPath The mount path containing versioned directories + */ + private void cleanupOldVersions(Path mountPath) { + try { + List versionDirs = new ArrayList<>(); + Files.list(mountPath).forEach(path -> { + String fileName = path.getFileName().toString(); + if (fileName.startsWith("..") && fileName.matches("\\.\\.[0-9_]+\\.[0-9]+")) { + versionDirs.add(path); + } + }); + + // Sort by modification time (oldest first) + versionDirs.sort((p1, p2) -> { + try { + return Long.compare(Files.getLastModifiedTime(p1).toMillis(), + Files.getLastModifiedTime(p2).toMillis()); + } catch (IOException e) { + return 0; + } + }); + + // Delete old versions, keeping last MAX_VERSION_HISTORY + int toDelete = versionDirs.size() - MAX_VERSION_HISTORY; + for (int i = 0; i < toDelete && i < versionDirs.size(); i++) { + try { + Files.walk(versionDirs.get(i)) + .sorted(Comparator.reverseOrder()) + .forEach(path -> { + try { + Files.delete(path); + } catch (IOException e) { + LoggingService.logWarning(MODULE_NAME, "Error deleting old version: " + path); + } + }); + } catch (IOException e) { + LoggingService.logWarning(MODULE_NAME, "Error cleaning up old version: " + versionDirs.get(i)); + } + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error during version cleanup: " + e.getMessage()); + } + } + + /** + * Sets file permissions based on volume mount type + * @param path File path + * @param type VolumeMountType + */ + private void setFilePermissions(Path path, VolumeMountType type) { + try { + if (SystemUtils.IS_OS_LINUX || SystemUtils.IS_OS_MAC) { + Set perms; + if (type == VolumeMountType.SECRET) { + // Secrets: 600 (rw-------) + perms = PosixFilePermissions.fromString("rw-------"); + } else { + // ConfigMaps: 644 (rw-r--r--) + perms = PosixFilePermissions.fromString("rw-r--r--"); + } + Files.setPosixFilePermissions(path, perms); + + // Try to set ownership to root:root (or current user if not root) + try { + UserPrincipalLookupService lookupService = FileSystems.getDefault().getUserPrincipalLookupService(); + GroupPrincipal group = lookupService.lookupPrincipalByGroupName("root"); + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, + PosixFileAttributeView.class, LinkOption.NOFOLLOW_LINKS); + fileAttributeView.setGroup(group); + } catch (Exception e) { + // Ignore if we can't set ownership (e.g., not running as root) + LoggingService.logDebug(MODULE_NAME, "Could not set file ownership: " + e.getMessage()); + } + } else if (SystemUtils.IS_OS_WINDOWS) { + DosFileAttributeView fileAttributeView = Files.getFileAttributeView(path, + DosFileAttributeView.class, LinkOption.NOFOLLOW_LINKS); + fileAttributeView.setReadOnly(type == VolumeMountType.SECRET); + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error setting file permissions: " + e.getMessage()); + } + } + + /** + * Sets symlink permissions to 777 (traversal permissions) + * @param symlink Symlink path + */ + private void setSymlinkPermissions(Path symlink) { + try { + if (SystemUtils.IS_OS_LINUX || SystemUtils.IS_OS_MAC) { + Set perms = PosixFilePermissions.fromString("rwxrwxrwx"); + Files.setPosixFilePermissions(symlink, perms); + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error setting symlink permissions: " + e.getMessage()); + } + } + /** * Deletes a volume mount * @param uuid UUID of the volume mount to delete @@ -338,7 +784,9 @@ private void deleteVolumeMount(String uuid) { // Delete mount directory and files String name = mountData.getString("name"); - Path mountPath = Paths.get(baseDirectory + name); + String typeStr = mountData.getString("type", "secret"); + String typeDir = "secret".equals(typeStr) ? SECRETS_DIR : CONFIGMAPS_DIR; + Path mountPath = Paths.get(baseDirectory, typeDir, name); if (Files.exists(mountPath)) { Files.walk(mountPath) .sorted(Comparator.reverseOrder()) @@ -361,6 +809,9 @@ private void deleteVolumeMount(String uuid) { }); indexData = newIndexBuilder.build(); + // Remove from type cache + typeCache.remove(name); + saveIndex(); LoggingService.logDebug(MODULE_NAME, "Volume mount deleted successfully: " + uuid); } catch (Exception e) { @@ -369,6 +820,348 @@ private void deleteVolumeMount(String uuid) { } } + /** + * Prepares per-microservice volume mount directory with symlinks + * Fast path: returns immediately if directory exists + * @param microserviceUuid Microservice UUID + * @param volumeName Volume mount name + * @param type VolumeMountType + * @return Path to per-microservice mount directory + */ + public String prepareMicroserviceVolumeMount(String microserviceUuid, String volumeName, VolumeMountType type) { + try { + Path mountPath = getMountPath(microserviceUuid, volumeName, type); + + // Fast path: if exists and has symlinks, skip (zero overhead) + if (Files.exists(mountPath) && hasSymlinks(mountPath)) { + return mountPath.toString(); + } + + // Slow path: create directory and copy files (only on first creation) + try { + // Atomic directory creation (handles race conditions) + Files.createDirectories(mountPath); + + // Calculate source path + String typeDir = getTypeDirectory(type); + Path sourcePath = Paths.get(baseDirectory, typeDir, volumeName); + Path sourceDataLink = sourcePath.resolve(DATA_SYMLINK); + + // Resolve the actual ..data symlink to get the real versioned directory + Path sourceVersionedDir; + if (Files.exists(sourceDataLink)) { + sourceVersionedDir = sourceDataLink.toRealPath(); + } else { + LoggingService.logWarning(MODULE_NAME, "Source ..data symlink does not exist for: " + volumeName); + return mountPath.toString(); // Return path anyway + } + + // Get the versioned directory name (e.g., ..2025_12_30_15_00_00.123456789) + String versionedDirName = sourceVersionedDir.getFileName().toString(); + + // Copy the versioned directory to per-microservice directory + Path targetVersionedDir = mountPath.resolve(versionedDirName); + if (!Files.exists(targetVersionedDir)) { + Files.createDirectories(targetVersionedDir); + + // Copy all files from source versioned directory to target + Files.list(sourceVersionedDir).forEach(sourceFile -> { + if (Files.isRegularFile(sourceFile)) { + try { + Path targetFile = targetVersionedDir.resolve(sourceFile.getFileName()); + Files.copy(sourceFile, targetFile, StandardCopyOption.REPLACE_EXISTING); + // Preserve file permissions + setFilePermissions(targetFile, type); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error copying file: " + sourceFile.getFileName() + " - " + e.getMessage()); + } + } + }); + } + + // Create ..data symlink pointing to versioned directory (relative path) + Path dataLink = mountPath.resolve(DATA_SYMLINK); + if (!Files.exists(dataLink)) { + // Use relative path for ..data symlink (works in containers) + Files.createSymbolicLink(dataLink, Paths.get(versionedDirName)); + setSymlinkPermissions(dataLink); + } + + // Create key symlinks pointing to ..data/key (relative paths) + Files.list(targetVersionedDir).forEach(keyPath -> { + if (Files.isRegularFile(keyPath)) { + String key = keyPath.getFileName().toString(); + try { + Path keyLink = mountPath.resolve(key); + if (!Files.exists(keyLink)) { + // Create relative symlink: key -> ..data/key + Files.createSymbolicLink(keyLink, Paths.get(DATA_SYMLINK + "/" + key)); + setSymlinkPermissions(keyLink); + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error creating key symlink: " + key + " - " + e.getMessage()); + } + } + }); + + // Track microservice usage in index + trackMicroserviceUsage(volumeName, microserviceUuid, true); + + } catch (Exception e) { + // Log error but don't fail container creation + LoggingService.logWarning(MODULE_NAME, + "Error creating volume mount structure: " + e.getMessage()); + // Return path anyway - background thread will fix + } + + return mountPath.toString(); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error preparing microservice volume mount", + new AgentSystemException(e.getMessage(), e)); + // Return path anyway to avoid blocking container creation + return getMountPath(microserviceUuid, volumeName, type).toString(); + } + } + + /** + * Gets the mount path for a microservice volume mount + * @param microserviceUuid Microservice UUID + * @param volumeName Volume mount name + * @param type VolumeMountType + * @return Path to per-microservice mount directory + */ + private Path getMountPath(String microserviceUuid, String volumeName, VolumeMountType type) { + String typePrefix = getTypePrefix(type); + return Paths.get(baseDirectory, MICROSERVICES_DIR, microserviceUuid, "volumes", typePrefix, volumeName); + } + + /** + * Syncs per-microservice symlinks when source volume mount is updated + * @param volumeName Volume mount name + * @param type VolumeMountType + */ + private void syncMicroserviceSymlinks(String volumeName, VolumeMountType type) { + synchronized (indexLock) { + try { + JsonObject volumeMountData = getVolumeMountByName(volumeName); + if (volumeMountData == null) { + return; + } + + JsonArray microservicesArray = volumeMountData.getJsonArray("microservices"); + if (microservicesArray == null) { + return; + } + + // Get the source versioned directory + String typeDir = getTypeDirectory(type); + Path sourcePath = Paths.get(baseDirectory, typeDir, volumeName); + Path sourceDataLink = sourcePath.resolve(DATA_SYMLINK); + + if (!Files.exists(sourceDataLink)) { + return; + } + + Path sourceVersionedDir = sourceDataLink.toRealPath(); + String versionedDirName = sourceVersionedDir.getFileName().toString(); + + // Update symlinks for each microservice using this volume mount + for (int i = 0; i < microservicesArray.size(); i++) { + String microserviceUuid = microservicesArray.getString(i); + Path mountPath = getMountPath(microserviceUuid, volumeName, type); + + if (!Files.exists(mountPath)) { + continue; + } + + // Copy new versioned directory if it doesn't exist + Path targetVersionedDir = mountPath.resolve(versionedDirName); + if (!Files.exists(targetVersionedDir)) { + Files.createDirectories(targetVersionedDir); + + // Copy all files from source versioned directory to target + Files.list(sourceVersionedDir).forEach(sourceFile -> { + if (Files.isRegularFile(sourceFile)) { + try { + Path targetFile = targetVersionedDir.resolve(sourceFile.getFileName()); + Files.copy(sourceFile, targetFile, StandardCopyOption.REPLACE_EXISTING); + // Preserve file permissions + setFilePermissions(targetFile, type); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error copying file: " + sourceFile.getFileName() + " - " + e.getMessage()); + } + } + }); + } + + // Atomically update ..data symlink to point to new version + Path dataLink = mountPath.resolve(DATA_SYMLINK); + Path newDataLink = mountPath.resolve(DATA_SYMLINK + ".tmp"); + if (Files.exists(newDataLink)) { + Files.delete(newDataLink); + } + Files.createSymbolicLink(newDataLink, Paths.get(versionedDirName)); + setSymlinkPermissions(newDataLink); + Files.move(newDataLink, dataLink, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + + // Update/create key symlinks (relative paths) + Files.list(targetVersionedDir).forEach(keyPath -> { + if (Files.isRegularFile(keyPath)) { + String key = keyPath.getFileName().toString(); + try { + Path keyLink = mountPath.resolve(key); + if (Files.exists(keyLink)) { + Files.delete(keyLink); + } + // Create relative symlink: key -> ..data/key + Files.createSymbolicLink(keyLink, Paths.get(DATA_SYMLINK + "/" + key)); + setSymlinkPermissions(keyLink); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error syncing key symlink: " + key + " for microservice: " + microserviceUuid); + } + } + }); + + // Remove symlinks for keys that no longer exist + Files.list(mountPath).forEach(path -> { + String fileName = path.getFileName().toString(); + if (!fileName.equals(DATA_SYMLINK) && !fileName.startsWith("..") && Files.isSymbolicLink(path)) { + if (!Files.exists(targetVersionedDir.resolve(fileName))) { + try { + Files.delete(path); + LoggingService.logDebug(MODULE_NAME, + "Removed obsolete symlink: " + fileName + " for microservice: " + microserviceUuid); + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error removing obsolete symlink: " + fileName); + } + } + } + }); + + // Clean up old versioned directories in per-microservice directory + cleanupOldVersions(mountPath); + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, "Error syncing microservice symlinks: " + e.getMessage()); + } + } + } + + /** + * Checks if mount path has symlinks (fast path check) + * @param mountPath Mount path to check + * @return true if ..data symlink exists + */ + private boolean hasSymlinks(Path mountPath) { + return Files.exists(mountPath.resolve(DATA_SYMLINK)); + } + + /** + * Tracks microservice usage of volume mounts in index + * @param volumeName Volume mount name + * @param microserviceUuid Microservice UUID + * @param add true to add, false to remove + */ + private void trackMicroserviceUsage(String volumeName, String microserviceUuid, boolean add) { + synchronized (indexLock) { + try { + // Find volume mount by name + for (String uuid : indexData.keySet()) { + JsonObject mountData = indexData.getJsonObject(uuid); + if (mountData != null && mountData.getString("name", "").equals(volumeName)) { + JsonArray microservicesArray = mountData.getJsonArray("microservices"); + List microservices = new ArrayList<>(); + + // Convert to list + if (microservicesArray != null) { + for (int i = 0; i < microservicesArray.size(); i++) { + microservices.add(microservicesArray.getString(i)); + } + } + + if (add && !microservices.contains(microserviceUuid)) { + microservices.add(microserviceUuid); + } else if (!add) { + microservices.remove(microserviceUuid); + } + + // Rebuild mount data with updated microservices list + JsonArrayBuilder arrayBuilder = Json.createArrayBuilder(); + microservices.forEach(arrayBuilder::add); + + JsonObjectBuilder mountBuilder = Json.createObjectBuilder(mountData); + mountBuilder.remove("microservices"); + mountBuilder.add("microservices", arrayBuilder.build()); + + JsonObjectBuilder indexBuilder = Json.createObjectBuilder(indexData); + indexBuilder.remove(uuid); + indexBuilder.add(uuid, mountBuilder.build()); + indexData = indexBuilder.build(); + + break; + } + } + } catch (Exception e) { + LoggingService.logWarning(MODULE_NAME, + "Error tracking microservice usage: " + e.getMessage()); + } + } + } + + /** + * Cleans up per-microservice volume mount directories + * @param microserviceUuid Microservice UUID + */ + public void cleanupMicroserviceVolumes(String microserviceUuid) { + try { + LoggingService.logDebug(MODULE_NAME, "Cleaning up microservice volumes: " + microserviceUuid); + + Path microservicePath = Paths.get(baseDirectory, MICROSERVICES_DIR, microserviceUuid); + if (Files.exists(microservicePath)) { + // Find all volume mounts used by this microservice from index + synchronized (indexLock) { + for (String uuid : indexData.keySet()) { + JsonObject mountData = indexData.getJsonObject(uuid); + if (mountData != null && mountData.containsKey("microservices")) { + JsonArray microservicesArray = mountData.getJsonArray("microservices"); + if (microservicesArray != null) { + for (int i = 0; i < microservicesArray.size(); i++) { + if (microserviceUuid.equals(microservicesArray.getString(i))) { + String volumeName = mountData.getString("name"); + // Remove from tracking + trackMicroserviceUsage(volumeName, microserviceUuid, false); + break; + } + } + } + } + } + } + + // Delete per-microservice mount directory + Files.walk(microservicePath) + .sorted(Comparator.reverseOrder()) + .forEach(path -> { + try { + Files.delete(path); + } catch (IOException e) { + LoggingService.logWarning(MODULE_NAME, "Error deleting microservice volume: " + path); + } + }); + } + + LoggingService.logDebug(MODULE_NAME, "Microservice volumes cleaned up: " + microserviceUuid); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error cleaning up microservice volumes", + new AgentSystemException(e.getMessage(), e)); + } + } + /** * Decodes a base64 encoded string * @param encoded Base64 encoded string @@ -411,36 +1204,53 @@ private String checksum(String data) { * Used during deprovisioning */ public void clear() { - try { - LoggingService.logDebug(MODULE_NAME, "Start clearing volume mounts"); - - // Delete all volume mount directories - File volumesDir = new File(baseDirectory); - if (volumesDir.exists()) { - File[] volumeDirs = volumesDir.listFiles(File::isDirectory); - if (volumeDirs != null) { - for (File dir : volumeDirs) { - deleteDirectory(dir); - } + synchronized (indexLock) { + try { + LoggingService.logDebug(MODULE_NAME, "Start clearing volume mounts"); + + // Delete all volume mount directories (secrets, configMaps, microservices) + Path volumesPath = Paths.get(baseDirectory); + if (Files.exists(volumesPath)) { + Files.walk(volumesPath) + .sorted(Comparator.reverseOrder()) + .forEach(path -> { + try { + // Don't delete the base directory itself + if (!path.equals(volumesPath)) { + Files.delete(path); + } + } catch (IOException e) { + LoggingService.logWarning(MODULE_NAME, "Error deleting: " + path); + } + }); } + + // Delete index file and backups + Path indexFile = Paths.get(baseDirectory + INDEX_FILE); + if (Files.exists(indexFile)) { + Files.delete(indexFile); + } + Path backupFile = Paths.get(baseDirectory + INDEX_FILE + ".bak"); + if (Files.exists(backupFile)) { + Files.delete(backupFile); + } + Path tempFile = Paths.get(baseDirectory + INDEX_FILE + ".tmp"); + if (Files.exists(tempFile)) { + Files.delete(tempFile); + } + + // Clear index data and cache + indexData = Json.createObjectBuilder().build(); + typeCache.clear(); + + // Update status reporter + StatusReporter.setVolumeMountManagerStatus(0, System.currentTimeMillis()); + + LoggingService.logDebug(MODULE_NAME, "Finished clearing volume mounts"); + } catch (Exception e) { + LoggingService.logError(MODULE_NAME, "Error clearing volume mounts", + new AgentSystemException(e.getMessage(), e)); } - - // Delete index file - File indexFile = new File(baseDirectory + INDEX_FILE); - if (indexFile.exists()) { - indexFile.delete(); - } - - // Clear index data - indexData = Json.createObjectBuilder().build(); - - // Update status reporter - StatusReporter.setVolumeMountManagerStatus(0, System.currentTimeMillis()); - - LoggingService.logDebug(MODULE_NAME, "Finished clearing volume mounts"); - } catch (Exception e) { - LoggingService.logError(MODULE_NAME, "Error clearing volume mounts", - new AgentSystemException(e.getMessage(), e)); } } diff --git a/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountType.java b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountType.java new file mode 100644 index 0000000..e5a5475 --- /dev/null +++ b/iofog-agent-daemon/src/main/java/org/eclipse/iofog/volume_mount/VolumeMountType.java @@ -0,0 +1,22 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +package org.eclipse.iofog.volume_mount; + +/** + * Enum representing the type of volume mount + */ +public enum VolumeMountType { + SECRET, + CONFIGMAP +} + diff --git a/iofog-agent-daemon/src/main/resources/cmd_messages.properties b/iofog-agent-daemon/src/main/resources/cmd_messages.properties index aa02b1d..112fc54 100644 --- a/iofog-agent-daemon/src/main/resources/cmd_messages.properties +++ b/iofog-agent-daemon/src/main/resources/cmd_messages.properties @@ -28,10 +28,11 @@ iofog_uuid=Iofog UUID ip_address=IP Address gps_mode=GPS mode gps_coordinates=GPS coordinates(lat,lon) -fog_type=Fog type +arch=Architecture dev_mode=Developer's Mode available_disk_threshold=Available Disk Threshold docker_pruning_frequency=Docker Pruning Frequency ready_to_upgrade_scan_frequency=Ready To Upgrade Scan Frequency secure_mode=Secure Mode -time_zone=Time Zone \ No newline at end of file +time_zone=Time Zone +namespace=Namespace \ No newline at end of file diff --git a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineActionTest.java b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineActionTest.java index 7cac2df..4a01e6a 100644 --- a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineActionTest.java +++ b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineActionTest.java @@ -87,13 +87,13 @@ public void setup() { configurationMockedStatic.when(Configuration::getConfigReport) .thenReturn("Config report"); - configurationMockedStatic.when(() -> Configuration.getOldNodeValuesForParameters(anySet(), any())) + configurationMockedStatic.when(() -> Configuration.getOldNodeValuesForParameters(anySet())) .thenReturn(result); configurationMockedStatic.when(() -> Configuration.setConfig(anyMap(),anyBoolean())) .thenReturn(new HashMap<>()) .thenThrow(new Exception("item not found or defined more than once")); - Mockito.when(CmdProperties.getVersion()).thenReturn("3.5.6"); + Mockito.when(CmdProperties.getVersion()).thenReturn("3.6.0"); Mockito.when(CmdProperties.getVersionMessage()).thenReturn(version); Mockito.when(CmdProperties.getDeprovisionMessage()).thenReturn("Deprovisioning from controller ... %s"); Mockito.when(CmdProperties.getProvisionMessage()).thenReturn("Provisioning with key \"%s\" ... Result: %s"); @@ -333,7 +333,7 @@ public void throwsExceptionsWhenConfigActionPerformWithValidOptionAndInvalidValu String[] args = {"config", "-ll", "severe"}; // Mockito.when(Configuration.setConfig(anyMap(), anyBoolean())).thenReturn(new HashMap<>()); -// Mockito.when(Configuration.getOldNodeValuesForParameters(anySet(), any())). +// Mockito.when(Configuration.getOldNodeValuesForParameters(anySet())). // thenReturn(result); Assertions.assertEquals("\\n\tChange accepted for Parameter : - ll, Old value was :info, New Value is : severe", CommandLineAction.getActionByKey(args[0]).perform(args)); @@ -364,7 +364,7 @@ private static boolean isEqual(List list1, List list2) { "0.00 MB\\nSystem Available Memory : " + "0.00 MB\\nSystem Total CPU : 0.00 %"; - private String version = "ioFog Agent 3.5.6 \n" + + private String version = "ioFog Agent 3.6.0 \n" + "Copyright (c) 2023 Datasance Teknoloji A.S. \n" + "Eclipse ioFog is provided under the Eclipse Public License 2.0 (EPL-2.0) \n" + "https://www.eclipse.org/legal/epl-v20.html"; diff --git a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineConfigParamTest.java b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineConfigParamTest.java index 2b2a961..6f8b7b6 100644 --- a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineConfigParamTest.java +++ b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/command_line/CommandLineConfigParamTest.java @@ -62,7 +62,7 @@ public void testGetCommandName() { assertEquals("gps", commandLineConfigParam.GPS_MODE.getCommandName()); assertEquals("", commandLineConfigParam.GPS_COORDINATES.getCommandName()); assertEquals("df", commandLineConfigParam.POST_DIAGNOSTICS_FREQ.getCommandName()); - assertEquals("ft", commandLineConfigParam.FOG_TYPE.getCommandName()); + assertEquals("ft", commandLineConfigParam.ARCH.getCommandName()); assertEquals("dev", commandLineConfigParam.DEV_MODE.getCommandName()); assertEquals("pf", commandLineConfigParam.DOCKER_PRUNING_FREQUENCY.getCommandName()); assertEquals("dt", commandLineConfigParam.AVAILABLE_DISK_THRESHOLD.getCommandName()); @@ -92,7 +92,7 @@ public void testGetXmlTag() { assertEquals("gps", commandLineConfigParam.GPS_MODE.getXmlTag()); assertEquals("gps_coordinates", commandLineConfigParam.GPS_COORDINATES.getXmlTag()); assertEquals("post_diagnostics_freq", commandLineConfigParam.POST_DIAGNOSTICS_FREQ.getXmlTag()); - assertEquals("fog_type", commandLineConfigParam.FOG_TYPE.getXmlTag()); + assertEquals("arch", commandLineConfigParam.ARCH.getXmlTag()); assertEquals("dev_mode", commandLineConfigParam.DEV_MODE.getXmlTag()); assertEquals("docker_pruning_freq", commandLineConfigParam.DOCKER_PRUNING_FREQUENCY.getXmlTag()); assertEquals("available_disk_threshold", commandLineConfigParam.AVAILABLE_DISK_THRESHOLD.getXmlTag()); @@ -125,7 +125,7 @@ public void testGetJsonProperty() { assertEquals("edgeGuardFrequency", commandLineConfigParam.EDGE_GUARD_FREQUENCY.getJsonProperty()); assertEquals("gpscoordinates", commandLineConfigParam.GPS_COORDINATES.getJsonProperty()); assertEquals("postdiagnosticsfreq", commandLineConfigParam.POST_DIAGNOSTICS_FREQ.getJsonProperty()); - assertEquals("", commandLineConfigParam.FOG_TYPE.getJsonProperty()); + assertEquals("", commandLineConfigParam.ARCH.getJsonProperty()); assertEquals("", commandLineConfigParam.DEV_MODE.getJsonProperty()); assertEquals("dockerPruningFrequency", commandLineConfigParam.DOCKER_PRUNING_FREQUENCY.getJsonProperty()); assertEquals("availableDiskThreshold", commandLineConfigParam.AVAILABLE_DISK_THRESHOLD.getJsonProperty()); @@ -158,7 +158,7 @@ public void testGetDefaultValue() { assertEquals("0", commandLineConfigParam.EDGE_GUARD_FREQUENCY.getDefaultValue()); assertEquals("", commandLineConfigParam.GPS_COORDINATES.getDefaultValue()); assertEquals("10", commandLineConfigParam.POST_DIAGNOSTICS_FREQ.getDefaultValue()); - assertEquals("auto", commandLineConfigParam.FOG_TYPE.getDefaultValue()); + assertEquals("auto", commandLineConfigParam.ARCH.getDefaultValue()); assertEquals("off", commandLineConfigParam.SECURE_MODE.getDefaultValue()); assertEquals("1", commandLineConfigParam.DOCKER_PRUNING_FREQUENCY.getDefaultValue()); assertEquals("20", commandLineConfigParam.AVAILABLE_DISK_THRESHOLD.getDefaultValue()); @@ -192,7 +192,7 @@ public void testGetCmdText() { assertEquals("-egf", commandLineConfigParam.EDGE_GUARD_FREQUENCY.getCmdText()); assertEquals("-", commandLineConfigParam.GPS_COORDINATES.getCmdText()); assertEquals("-df", commandLineConfigParam.POST_DIAGNOSTICS_FREQ.getCmdText()); - assertEquals("-ft", commandLineConfigParam.FOG_TYPE.getCmdText()); + assertEquals("-ft", commandLineConfigParam.ARCH.getCmdText()); assertEquals("-dev", commandLineConfigParam.DEV_MODE.getCmdText()); assertEquals("-pf", commandLineConfigParam.DOCKER_PRUNING_FREQUENCY.getCmdText()); assertEquals("-dt", commandLineConfigParam.AVAILABLE_DISK_THRESHOLD.getCmdText()); diff --git a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/CmdPropertiesTest.java b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/CmdPropertiesTest.java index 0a0c7d2..edc3fca 100644 --- a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/CmdPropertiesTest.java +++ b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/CmdPropertiesTest.java @@ -46,7 +46,7 @@ public void tearDown() throws Exception { //@Test //public void getVersionMessage() { - // assertEquals("ioFog Agent 3.5.6 \nCopyright (c) 2023 Datasance Teknoloji A.S. \nEclipse ioFog is provided under the Eclipse Public License 2.0 (EPL-2.0) \nhttps://www.eclipse.org/legal/epl-v20.html", + // assertEquals("ioFog Agent 3.6.0 \nCopyright (c) 2023 Datasance Teknoloji A.S. \nEclipse ioFog is provided under the Eclipse Public License 2.0 (EPL-2.0) \nhttps://www.eclipse.org/legal/epl-v20.html", // CmdProperties.getVersionMessage()); //} @@ -177,11 +177,11 @@ public void testGetConfigParamMessageOfGPSMode() { } /** - * Test getConfigParamMessage FOG_TYPE + * Test getConfigParamMessage ARCH */ @Test - public void testGetConfigParamMessageOfFogType() { - assertEquals("Fog type", CmdProperties.getConfigParamMessage(CommandLineConfigParam.FOG_TYPE)); + public void testGetConfigParamMessageOfArch() { + assertEquals("Architecture", CmdProperties.getConfigParamMessage(CommandLineConfigParam.ARCH)); } /** * Test getConfigParamMessage DEV_MODE diff --git a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/OrchestratorTest.java b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/OrchestratorTest.java index d3e7833..48fe41a 100644 --- a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/OrchestratorTest.java +++ b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/OrchestratorTest.java @@ -178,7 +178,7 @@ public void setUp() throws Exception { Mockito.when(MultipartEntityBuilder.create()).thenReturn(multipartEntityBuilder); Mockito.when(multipartEntityBuilder.build()).thenReturn(httpEntity); Mockito.when(Configuration.getIofogUuid()).thenReturn("iofog-uuid"); - Mockito.when(Configuration.getFogType()).thenReturn(ArchitectureType.ARM); + Mockito.when(Configuration.getArch()).thenReturn(ArchitectureType.ARM); // Mockito.when(Configuration.getAccessToken()).thenReturn("access-token"); Mockito.when(Configuration.getPrivateKey()).thenReturn("privateKey"); Mockito.when(Configuration.getControllerUrl()).thenReturn("http://controller/"); diff --git a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/configuration/ConfigurationTest.java b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/configuration/ConfigurationTest.java index 9e0ab68..3f8f5a1 100644 --- a/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/configuration/ConfigurationTest.java +++ b/iofog-agent-daemon/src/test/java/org/eclipse/iofog/utils/configuration/ConfigurationTest.java @@ -195,8 +195,8 @@ public void testGettersAndSetters() { assertEquals(10, Configuration.getPostDiagnosticsFreq()); Configuration.setPostDiagnosticsFreq(60); assertEquals(60, Configuration.getPostDiagnosticsFreq()); - Configuration.setFogType(ArchitectureType.ARM); - assertEquals(ArchitectureType.ARM, Configuration.getFogType()); + Configuration.setArch(ArchitectureType.ARM); + assertEquals(ArchitectureType.ARM, Configuration.getArch()); assertEquals(false, Configuration.isSecureMode()); Configuration.setSecureMode(false); assertEquals( false, Configuration.isSecureMode()); @@ -235,7 +235,7 @@ public void testGetOldNodeValuesForParameters() { initializeConfiguration(); Set config = new HashSet<>(); config.add("ll"); - HashMap oldValuesMap = Configuration.getOldNodeValuesForParameters(config, Configuration.getCurrentConfig()); + HashMap oldValuesMap = Configuration.getOldNodeValuesForParameters(config); for (HashMap.Entry element : oldValuesMap.entrySet()) { assertEquals( Configuration.getLogLevel(), element.getValue()); } @@ -284,8 +284,8 @@ public void testResetToDefault() { assertEquals(GpsMode.DYNAMIC, Configuration.getGpsMode()); Configuration.setPostDiagnosticsFreq(60); assertEquals(60, Configuration.getPostDiagnosticsFreq()); - Configuration.setFogType(ArchitectureType.ARM); - assertEquals(ArchitectureType.ARM, Configuration.getFogType()); + Configuration.setArch(ArchitectureType.ARM); + assertEquals(ArchitectureType.ARM, Configuration.getArch()); Configuration.setSecureMode(false); Assertions.assertFalse(Configuration.isSecureMode()); Configuration.setIpAddressExternal("ipExternal"); @@ -1351,10 +1351,10 @@ public void testSetConfigForGPSModeWithInValidCoordinates() { } /** - * Test setConfig when FOG_TYPE with invalid value + * Test setConfig when ARCH with invalid value */ @Test - public void testSetConfigForFogTypeWithInValidValue() { + public void testSetConfigForArchWithInValidValue() { try { String value = "value"; initializeConfiguration(); @@ -1372,10 +1372,10 @@ public void testSetConfigForFogTypeWithInValidValue() { } /** - * Test setConfig when FOG_TYPE with valid value + * Test setConfig when ARCH with valid value */ @Test - public void testSetConfigForFogTypeWithValidValue() { + public void testSetConfigForArchWithValidValue() { try { String value = "auto"; initializeConfiguration(); diff --git a/packaging/iofog-agent/debian.sh b/packaging/iofog-agent/debian.sh index 9cd9716..ebef85f 100644 --- a/packaging/iofog-agent/debian.sh +++ b/packaging/iofog-agent/debian.sh @@ -20,37 +20,13 @@ useradd -r -U -s /usr/bin/nologin iofog-agent usermod -aG adm,sudo iofog-agent echo "Added ioFog-Agent user and group" -if [ -f /etc/iofog-agent/config.xml ]; +if [ -f /etc/iofog-agent/config.yaml ]; then - rm /etc/iofog-agent/config_new.xml + rm /etc/iofog-agent/config_new.yaml else - mv /etc/iofog-agent/config_new.xml /etc/iofog-agent/config.xml + mv /etc/iofog-agent/config_new.yaml /etc/iofog-agent/config.yaml fi -echo "Check for config.xml" - -if [ -f /etc/iofog-agent/config-development.xml ]; -then - rm /etc/iofog-agent/config-development_new.xml -else - mv /etc/iofog-agent/config-development_new.xml /etc/iofog-agent/config-development.xml -fi -#echo "Check for config-development.xml" - -if [ -f /etc/iofog-agent/config-production.xml ]; -then - rm /etc/iofog-agent/config-production_new.xml -else - mv /etc/iofog-agent/config-production_new.xml /etc/iofog-agent/config-production.xml -fi -#echo "Check for config-production.xml" - -if [ -f /etc/iofog-agent/config-switcher.xml ]; -then - rm /etc/iofog-agent/config-switcher_new.xml -else - mv /etc/iofog-agent/config-switcher_new.xml /etc/iofog-agent/config-switcher.xml -fi -#echo "Check for config-switcher.xml" +echo "Check for config.yaml" if [ -f /etc/iofog-agent/cert.crt ]; then @@ -60,14 +36,6 @@ else fi echo "Check for cert.crt" -if [ -f /etc/iofog-agent/config-bck.xml ]; -then - rm /etc/iofog-agent/config-bck_new.xml -else - mv /etc/iofog-agent/config-bck_new.xml /etc/iofog-agent/config-bck.xml -fi -echo "Check for config-bck.xml" - /etc/iofog-agent/local-api mkdir -p /var/backups/iofog-agent diff --git a/packaging/iofog-agent/etc/iofog-agent/config-bck_new.xml b/packaging/iofog-agent/etc/iofog-agent/config-bck_new.xml deleted file mode 100644 index 28af678..0000000 --- a/packaging/iofog-agent/etc/iofog-agent/config-bck_new.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - http://localhost:54421/api/v3/ - - - - on - - /etc/iofog-agent/cert.crt - - auto - - dynamic - - unix:///var/run/docker.sock - - 10 - - /var/lib/iofog-agent/ - - 4096 - - 80.0 - - 10.0 - - /var/log/iofog-agent/ - - 10 - - INFO - - 30 - - 60 - - 10 - - 60 - - auto - - 0,0 - - - - 60 - - off - - 0 - - 0 - - 20 - - 24 - - - - - - diff --git a/packaging/iofog-agent/etc/iofog-agent/config-development_new.xml b/packaging/iofog-agent/etc/iofog-agent/config-development_new.xml deleted file mode 100644 index ef565fe..0000000 --- a/packaging/iofog-agent/etc/iofog-agent/config-development_new.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - http://localhost:51121/api/v3/ - - - off - on - - /etc/iofog-agent/cert.crt - - auto - - dynamic - - unix:///var/run/docker.sock - - 10 - - /var/lib/iofog-agent/ - - 4096 - - 80.0 - - 10.0 - - /var/log/iofog-agent/ - - 10 - - INFO - - 30 - - 60 - - 10 - - 60 - - auto - - 0,0 - - - - 60 - - off - - 0 - - 0 - - 20 - - 24 - - - - - - diff --git a/packaging/iofog-agent/etc/iofog-agent/config-production_new.xml b/packaging/iofog-agent/etc/iofog-agent/config-production_new.xml deleted file mode 100644 index c9fbcf7..0000000 --- a/packaging/iofog-agent/etc/iofog-agent/config-production_new.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - - - - - http://localhost:54421/api/v3/ - - - - on - off - - /etc/iofog-agent/cert.crt - - auto - - dynamic - - unix:///var/run/docker.sock - - 10 - - /var/lib/iofog-agent/ - - 4096 - - 80.0 - - 10.0 - - /var/log/iofog-agent/ - - 10 - - INFO - - 30 - - 60 - - 10 - - 60 - - auto - - 0,0 - - - - 60 - - off - - 0 - - 0 - - 20 - - 24 - - - - - - diff --git a/packaging/iofog-agent/etc/iofog-agent/config-switcher_new.xml b/packaging/iofog-agent/etc/iofog-agent/config-switcher_new.xml deleted file mode 100644 index c281148..0000000 --- a/packaging/iofog-agent/etc/iofog-agent/config-switcher_new.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - default - \ No newline at end of file diff --git a/packaging/iofog-agent/etc/iofog-agent/config_new.xml b/packaging/iofog-agent/etc/iofog-agent/config_new.xml deleted file mode 100644 index 045a265..0000000 --- a/packaging/iofog-agent/etc/iofog-agent/config_new.xml +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - - - - - http://localhost:54421/api/v3/ - - - - off - off - - /etc/iofog-agent/cert.crt - - auto - - dynamic - - unix:///var/run/docker.sock - - 10 - - /var/lib/iofog-agent/ - - 4096 - - 80.0 - - 10.0 - - /var/log/iofog-agent/ - - 10 - - INFO - - 30 - - 60 - - 10 - - 60 - - auto - - 0,0 - - - - 60 - - off - - 0 - - 0 - - 20 - - 24 - - - - - - diff --git a/packaging/iofog-agent/etc/iofog-agent/config_new.yaml b/packaging/iofog-agent/etc/iofog-agent/config_new.yaml new file mode 100644 index 0000000..915a2d2 --- /dev/null +++ b/packaging/iofog-agent/etc/iofog-agent/config_new.yaml @@ -0,0 +1,138 @@ +# /******************************************************************************** +# * Copyright (c) 2023 Datasance Teknoloji A.S. +# * +# * This program and the accompanying materials are made available under the +# * terms of the Eclipse Public License v. 2.0 which is available at +# * http://www.eclipse.org/legal/epl-2.0 +# * +# * SPDX-License-Identifier: EPL-2.0 +# ********************************************************************************/ +# +# IOFog Agent Configuration +# This file contains all configuration profiles in a single YAML file + +currentProfile: default # default | development | production + +profiles: + default: + privateKey: "" + routerHost: "" + routerPort: "" + routerUuid: "" + controllerUrl: "http://localhost:54421/api/v3/" + iofogUuid: "" + secureMode: "off" + devMode: "off" + controllerCert: "/etc/iofog-agent/cert.crt" + arch: "auto" + networkInterface: "dynamic" + dockerUrl: "unix:///var/run/docker.sock" + diskConsumptionLimit: "10" + diskDirectory: "/var/lib/iofog-agent/" + memoryConsumptionLimit: "4096" + processorConsumptionLimit: "80.0" + logDiskConsumptionLimit: "10.0" + logDiskDirectory: "/var/log/iofog-agent/" + logFileCount: "10" + logLevel: "INFO" + statusUpdateFreq: "30" + getChangesFreq: "60" + postDiagnosticsFreq: "10" + scanDevicesFreq: "60" + gps: "auto" + gpsCoordinates: "0,0" + gpsDevice: "" + gpsScanFreq: "60" + isolatedDockerContainer: "off" + edgeGuardFreq: "0" + dockerPruningFreq: "0" + availableDiskThreshold: "20" + upgradeScanFrequency: "24" + timeZone: "" + namespace: "default" + caCert: "" + tlsCert: "" + tlsKey: "" + hwSignature: "" + + development: + privateKey: "" + routerHost: "" + routerPort: "" + routerUuid: "" + controllerUrl: "http://localhost:51121/api/v3/" + iofogUuid: "" + secureMode: "off" + devMode: "on" + controllerCert: "/etc/iofog-agent/cert.crt" + arch: "auto" + networkInterface: "dynamic" + dockerUrl: "unix:///var/run/docker.sock" + diskConsumptionLimit: "10" + diskDirectory: "/var/lib/iofog-agent/" + memoryConsumptionLimit: "4096" + processorConsumptionLimit: "80.0" + logDiskConsumptionLimit: "10.0" + logDiskDirectory: "/var/log/iofog-agent/" + logFileCount: "10" + logLevel: "INFO" + statusUpdateFreq: "30" + getChangesFreq: "60" + postDiagnosticsFreq: "10" + scanDevicesFreq: "60" + gps: "auto" + gpsCoordinates: "0,0" + gpsDevice: "" + gpsScanFreq: "60" + isolatedDockerContainer: "off" + edgeGuardFreq: "0" + dockerPruningFreq: "0" + availableDiskThreshold: "20" + upgradeScanFrequency: "24" + timeZone: "" + namespace: "default" + caCert: "" + tlsCert: "" + tlsKey: "" + hwSignature: "" + + production: + privateKey: "" + routerHost: "" + routerPort: "" + routerUuid: "" + controllerUrl: "http://localhost:54421/api/v3/" + iofogUuid: "" + secureMode: "on" + devMode: "off" + controllerCert: "/etc/iofog-agent/cert.crt" + arch: "auto" + networkInterface: "dynamic" + dockerUrl: "unix:///var/run/docker.sock" + diskConsumptionLimit: "10" + diskDirectory: "/var/lib/iofog-agent/" + memoryConsumptionLimit: "4096" + processorConsumptionLimit: "80.0" + logDiskConsumptionLimit: "10.0" + logDiskDirectory: "/var/log/iofog-agent/" + logFileCount: "10" + logLevel: "INFO" + statusUpdateFreq: "30" + getChangesFreq: "60" + postDiagnosticsFreq: "10" + scanDevicesFreq: "60" + gps: "auto" + gpsCoordinates: "0,0" + gpsDevice: "" + gpsScanFreq: "60" + isolatedDockerContainer: "off" + edgeGuardFreq: "0" + dockerPruningFreq: "0" + availableDiskThreshold: "20" + upgradeScanFrequency: "24" + timeZone: "" + namespace: "default" + caCert: "" + tlsCert: "" + tlsKey: "" + hwSignature: "" diff --git a/packaging/iofog-agent/rpm.sh b/packaging/iofog-agent/rpm.sh index f7b6a09..90a8810 100644 --- a/packaging/iofog-agent/rpm.sh +++ b/packaging/iofog-agent/rpm.sh @@ -23,37 +23,13 @@ groupadd -r iofog-agent useradd -r -g iofog-agent iofog-agent #echo "Added iofog-agent user and group" -if [ -f /etc/iofog-agent/config.xml ]; +if [ -f /etc/iofog-agent/config.yaml ]; then - rm /etc/iofog-agent/config_new.xml + rm /etc/iofog-agent/config_new.yaml else - mv /etc/iofog-agent/config_new.xml /etc/iofog-agent/config.xml + mv /etc/iofog-agent/config_new.yaml /etc/iofog-agent/config.yaml fi -#echo "Check for config.xml" - -if [ -f /etc/iofog-agent/config-development.xml ]; -then - rm /etc/iofog-agent/config-development_new.xml -else - mv /etc/iofog-agent/config-development_new.xml /etc/iofog-agent/config-development.xml -fi -#echo "Check for config-development.xml" - -if [ -f /etc/iofog-agent/config-production.xml ]; -then - rm /etc/iofog-agent/config-production_new.xml -else - mv /etc/iofog-agent/config-production_new.xml /etc/iofog-agent/config-production.xml -fi -#echo "Check for config-production.xml" - -if [ -f /etc/iofog-agent/config-switcher.xml ]; -then - rm /etc/iofog-agent/config-switcher_new.xml -else - mv /etc/iofog-agent/config-switcher_new.xml /etc/iofog-agent/config-switcher.xml -fi -#echo "Check for config-switcher.xml" +#echo "Check for config.yaml" if [ -f /etc/iofog-agent/cert.crt ]; then @@ -61,15 +37,7 @@ then else mv /etc/iofog-agent/cert_new.crt /etc/iofog-agent/cert.crt fi -#echo "Check for config.xml" - -if [ -f /etc/iofog-agent/config-bck.xml ]; -then - rm /etc/iofog-agent/config-bck_new.xml -else - mv /etc/iofog-agent/config-bck_new.xml /etc/iofog-agent/config-bck.xml -fi -#echo "Check for config-bck.xml" +#echo "Check for cert.crt" /etc/iofog-agent/local-api