mirror of
https://github.com/runcitadel/core.git
synced 2024-11-11 16:30:38 +00:00
Merge branch 'feat/quick-updates' into feat/quick-updates-c-ln
This commit is contained in:
commit
7f4687cd82
|
@ -3,9 +3,9 @@
|
||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
# A collection of fully FLOSS app definitions and FLOSS apps for Citadel.
|
# A collection of fully FLOSS app definitions and FLOSS apps for Citadel.
|
||||||
https://github.com/runcitadel/apps v3-beta
|
https://github.com/runcitadel/apps v3-stable
|
||||||
|
|
||||||
# Some apps modified version of Umbrel apps, and their app definitions aren't FLOSS yet.
|
# Some apps modified version of Umbrel apps, and their app definitions aren't FLOSS yet.
|
||||||
# Include them anyway, but as a separate repo.
|
# Include them anyway, but as a separate repo.
|
||||||
# Add a # to the line below to disable the repo and only use FLOSS apps.
|
# Add a # to the line below to disable the repo and only use FLOSS apps.
|
||||||
https://github.com/runcitadel/apps-nonfree v3-beta
|
https://github.com/runcitadel/apps-nonfree v3-stable
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
from lib.citadelutils import classToDict
|
|
||||||
from lib.composegenerator.shared.env import validateEnv
|
|
||||||
|
|
||||||
from lib.composegenerator.v3.types import App, generateApp
|
|
||||||
from lib.composegenerator.v3.generate import convertContainerPermissions
|
|
||||||
|
|
||||||
def createCleanConfigFromV3(app: dict, nodeRoot: str):
|
|
||||||
parsedApp: App = generateApp(app)
|
|
||||||
for container in range(len(parsedApp.containers)):
|
|
||||||
# TODO: Make this dynamic and not hardcoded
|
|
||||||
if parsedApp.containers[container].requires and "c-lightning" in parsedApp.containers[container].requires:
|
|
||||||
parsedApp.containers[container] = None
|
|
||||||
parsedApp = convertContainerPermissions(parsedApp)
|
|
||||||
parsedApp = validateEnv(parsedApp)
|
|
||||||
finalApp = classToDict(parsedApp)
|
|
||||||
try:
|
|
||||||
finalApp['permissions'] = finalApp['metadata']['dependencies']
|
|
||||||
except:
|
|
||||||
finalApp['permissions'] = []
|
|
||||||
finalApp['id'] = finalApp['metadata']['id']
|
|
||||||
del finalApp['metadata']
|
|
||||||
# Set version of the cache file format
|
|
||||||
finalApp['version'] = "1"
|
|
||||||
return finalApp
|
|
|
@ -54,6 +54,59 @@ def getFreePort(networkingFile: str, appId: str):
|
||||||
|
|
||||||
return port
|
return port
|
||||||
|
|
||||||
|
def assignIpV4(appId: str, containerName: str):
|
||||||
|
scriptDir = path.dirname(path.realpath(__file__))
|
||||||
|
nodeRoot = path.join(scriptDir, "..", "..", "..", "..")
|
||||||
|
networkingFile = path.join(nodeRoot, "apps", "networking.json")
|
||||||
|
envFile = path.join(nodeRoot, ".env")
|
||||||
|
cleanContainerName = containerName.strip()
|
||||||
|
# If the name still contains a newline, throw an error
|
||||||
|
if cleanContainerName.find("\n") != -1:
|
||||||
|
raise Exception("Newline in container name")
|
||||||
|
env_var = "APP_{}_{}_IP".format(
|
||||||
|
appId.upper().replace("-", "_"),
|
||||||
|
cleanContainerName.upper().replace("-", "_")
|
||||||
|
)
|
||||||
|
# Write a list of used IPs to the usedIpFile as JSON, and read that file to check if an IP
|
||||||
|
# can be used
|
||||||
|
usedIps = []
|
||||||
|
networkingData = {}
|
||||||
|
if path.isfile(networkingFile):
|
||||||
|
with open(networkingFile, 'r') as f:
|
||||||
|
networkingData = json.load(f)
|
||||||
|
|
||||||
|
if 'ip_addresses' in networkingData:
|
||||||
|
usedIps = list(networkingData['ip_addresses'].values())
|
||||||
|
else:
|
||||||
|
networkingData['ip_addresses'] = {}
|
||||||
|
# An IP 10.21.21.xx, with x being a random number above 40 is asigned to the container
|
||||||
|
# If the IP is already in use, it will be tried again until it's not in use
|
||||||
|
# If it's not in use, it will be added to the usedIps list and written to the usedIpFile
|
||||||
|
# If the usedIpsFile contains all IPs between 10.21.21.20 and 10.21.21.255 (inclusive),
|
||||||
|
# Throw an error, because no more IPs can be used
|
||||||
|
if len(usedIps) == 235:
|
||||||
|
raise Exception("No more IPs can be used")
|
||||||
|
|
||||||
|
if "{}-{}".format(appId, cleanContainerName) in networkingData['ip_addresses']:
|
||||||
|
ip = networkingData['ip_addresses']["{}-{}".format(
|
||||||
|
appId, cleanContainerName)]
|
||||||
|
else:
|
||||||
|
while True:
|
||||||
|
ip = "10.21.21." + str(random.randint(20, 255))
|
||||||
|
if ip not in usedIps:
|
||||||
|
networkingData['ip_addresses']["{}-{}".format(
|
||||||
|
appId, cleanContainerName)] = ip
|
||||||
|
break
|
||||||
|
|
||||||
|
dotEnv = parse_dotenv(envFile)
|
||||||
|
if env_var in dotEnv and str(dotEnv[env_var]) == str(ip):
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(envFile, 'a') as f:
|
||||||
|
f.write("{}={}\n".format(env_var, ip))
|
||||||
|
with open(networkingFile, 'w') as f:
|
||||||
|
json.dump(networkingData, f)
|
||||||
|
|
||||||
def assignIp(container: ContainerStage2, appId: str, networkingFile: str, envFile: str) -> ContainerStage2:
|
def assignIp(container: ContainerStage2, appId: str, networkingFile: str, envFile: str) -> ContainerStage2:
|
||||||
# Strip leading/trailing whitespace from container.name
|
# Strip leading/trailing whitespace from container.name
|
||||||
container.name = container.name.strip()
|
container.name = container.name.strip()
|
||||||
|
|
|
@ -97,7 +97,7 @@ def createComposeConfigFromV3(app: dict, nodeRoot: str):
|
||||||
del container.requiredPorts
|
del container.requiredPorts
|
||||||
for container in newApp.containers:
|
for container in newApp.containers:
|
||||||
for udpPort in container.requiredUdpPorts:
|
for udpPort in container.requiredUdpPorts:
|
||||||
container.ports.append("{}/udp:{}/udp".format(udpPort, udpPort))
|
container.ports.append("{}:{}/udp".format(udpPort, udpPort))
|
||||||
del container.requiredUdpPorts
|
del container.requiredUdpPorts
|
||||||
newApp = configureMainPort(newApp, nodeRoot)
|
newApp = configureMainPort(newApp, nodeRoot)
|
||||||
newApp = configureHiddenServices(newApp, nodeRoot)
|
newApp = configureHiddenServices(newApp, nodeRoot)
|
||||||
|
|
|
@ -6,9 +6,11 @@ import stat
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import threading
|
import threading
|
||||||
|
import random
|
||||||
from typing import List
|
from typing import List
|
||||||
from sys import argv
|
from sys import argv
|
||||||
import os
|
import os
|
||||||
|
import fcntl
|
||||||
import requests
|
import requests
|
||||||
import shutil
|
import shutil
|
||||||
import json
|
import json
|
||||||
|
@ -31,9 +33,31 @@ from lib.validate import findAndValidateApps
|
||||||
from lib.metadata import getAppRegistry
|
from lib.metadata import getAppRegistry
|
||||||
from lib.entropy import deriveEntropy
|
from lib.entropy import deriveEntropy
|
||||||
|
|
||||||
|
class FileLock:
|
||||||
|
"""Implements a file-based lock using flock(2).
|
||||||
|
The lock file is saved in directory dir with name lock_name.
|
||||||
|
dir is the current directory by default.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, lock_name, dir="."):
|
||||||
|
self.lock_file = open(os.path.join(dir, lock_name), "w")
|
||||||
|
|
||||||
|
def acquire(self, blocking=True):
|
||||||
|
"""Acquire the lock.
|
||||||
|
If the lock is not already acquired, return None. If the lock is
|
||||||
|
acquired and blocking is True, block until the lock is released. If
|
||||||
|
the lock is acquired and blocking is False, raise an IOError.
|
||||||
|
"""
|
||||||
|
ops = fcntl.LOCK_EX
|
||||||
|
if not blocking:
|
||||||
|
ops |= fcntl.LOCK_NB
|
||||||
|
fcntl.flock(self.lock_file, ops)
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
"""Release the lock. Return None even if lock not currently acquired"""
|
||||||
|
fcntl.flock(self.lock_file, fcntl.LOCK_UN)
|
||||||
|
|
||||||
# For an array of threads, join them and wait for them to finish
|
# For an array of threads, join them and wait for them to finish
|
||||||
|
|
||||||
|
|
||||||
def joinThreads(threads: List[threading.Thread]):
|
def joinThreads(threads: List[threading.Thread]):
|
||||||
for thread in threads:
|
for thread in threads:
|
||||||
thread.join()
|
thread.join()
|
||||||
|
@ -49,16 +73,48 @@ updateIgnore = os.path.join(appsDir, ".updateignore")
|
||||||
appDataDir = os.path.join(nodeRoot, "app-data")
|
appDataDir = os.path.join(nodeRoot, "app-data")
|
||||||
userFile = os.path.join(nodeRoot, "db", "user.json")
|
userFile = os.path.join(nodeRoot, "db", "user.json")
|
||||||
legacyScript = os.path.join(nodeRoot, "scripts", "app")
|
legacyScript = os.path.join(nodeRoot, "scripts", "app")
|
||||||
|
with open(os.path.join(nodeRoot, "db", "dependencies.yml"), "r") as file:
|
||||||
|
dependencies = yaml.safe_load(file)
|
||||||
|
|
||||||
|
|
||||||
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
|
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
|
||||||
|
|
||||||
|
|
||||||
def getArguments():
|
def getArguments():
|
||||||
arguments = ""
|
arguments = ""
|
||||||
for i in range(3, len(argv)):
|
for i in range(3, len(argv)):
|
||||||
arguments += argv[i] + " "
|
arguments += argv[i] + " "
|
||||||
return arguments
|
return arguments
|
||||||
|
|
||||||
|
def handleAppV4(app):
|
||||||
|
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
|
||||||
|
os.chown(os.path.join(appsDir, app), 1000, 1000)
|
||||||
|
os.system("docker run --rm -v {}:/apps -u 1000:1000 {} /app-cli convert --app-name '{}' --port-map /apps/ports.json /apps/{}/app.yml /apps/{}/result.yml --services 'c-lightning'".format(appsDir, dependencies['app-cli'], app, app, app))
|
||||||
|
with open(os.path.join(appsDir, app, "result.yml"), "r") as resultFile:
|
||||||
|
resultYml = yaml.safe_load(resultFile)
|
||||||
|
with open(composeFile, "w") as dockerComposeFile:
|
||||||
|
yaml.dump(resultYml["spec"], dockerComposeFile)
|
||||||
|
torDaemons = ["torrc-apps", "torrc-apps-2", "torrc-apps-3"]
|
||||||
|
torFileToAppend = torDaemons[random.randint(0, len(torDaemons) - 1)]
|
||||||
|
with open(os.path.join(nodeRoot, "tor", torFileToAppend), 'a') as f:
|
||||||
|
f.write(resultYml["new_tor_entries"])
|
||||||
|
mainPort = resultYml["port"]
|
||||||
|
registryFile = os.path.join(nodeRoot, "apps", "registry.json")
|
||||||
|
registry: list = []
|
||||||
|
lock = FileLock("citadeL_registry_lock", dir="/tmp")
|
||||||
|
lock.acquire()
|
||||||
|
if os.path.isfile(registryFile):
|
||||||
|
with open(registryFile, 'r') as f:
|
||||||
|
registry = json.load(f)
|
||||||
|
else:
|
||||||
|
raise Exception("Registry file not found")
|
||||||
|
|
||||||
|
for registryApp in registry:
|
||||||
|
if registryApp['id'] == app:
|
||||||
|
registry[registry.index(registryApp)]['port'] = resultYml["port"]
|
||||||
|
break
|
||||||
|
|
||||||
|
with open(registryFile, 'w') as f:
|
||||||
|
json.dump(registry, f, indent=4, sort_keys=True)
|
||||||
|
lock.release()
|
||||||
|
|
||||||
def getAppYml(name):
|
def getAppYml(name):
|
||||||
with open(os.path.join(appsDir, "sourceMap.json"), "r") as f:
|
with open(os.path.join(appsDir, "sourceMap.json"), "r") as f:
|
||||||
|
@ -88,6 +144,7 @@ def update(verbose: bool = False):
|
||||||
json.dump(registry["ports"], f, sort_keys=True)
|
json.dump(registry["ports"], f, sort_keys=True)
|
||||||
print("Wrote registry to registry.json")
|
print("Wrote registry to registry.json")
|
||||||
|
|
||||||
|
threads = list()
|
||||||
# Loop through the apps and generate valid compose files from them, then put these into the app dir
|
# Loop through the apps and generate valid compose files from them, then put these into the app dir
|
||||||
for app in apps:
|
for app in apps:
|
||||||
try:
|
try:
|
||||||
|
@ -96,9 +153,9 @@ def update(verbose: bool = False):
|
||||||
with open(appYml, 'r') as f:
|
with open(appYml, 'r') as f:
|
||||||
appDefinition = yaml.safe_load(f)
|
appDefinition = yaml.safe_load(f)
|
||||||
if 'citadel_version' in appDefinition:
|
if 'citadel_version' in appDefinition:
|
||||||
os.chown(os.path.join(appsDir, app), 1000, 1000)
|
thread = threading.Thread(target=handleAppV4, args=(app,))
|
||||||
print("docker run --rm -v {}:/apps -u 1000:1000 ghcr.io/runcitadel/app-cli:main /app-cli convert --app-name '{}' --port-map /apps/ports.json /apps/{}/app.yml /apps/{}/docker-compose.yml --services 'c-lightning'".format(appsDir, app, app, app))
|
thread.start()
|
||||||
os.system("docker run --rm -v {}:/apps -u 1000:1000 ghcr.io/runcitadel/app-cli:main /app-cli convert --app-name '{}' --port-map /apps/ports.json /apps/{}/app.yml /apps/{}/docker-compose.yml --services 'c-lightning'".format(appsDir, app, app, app))
|
threads.append(thread)
|
||||||
else:
|
else:
|
||||||
appCompose = getApp(appDefinition, app)
|
appCompose = getApp(appDefinition, app)
|
||||||
with open(composeFile, "w") as f:
|
with open(composeFile, "w") as f:
|
||||||
|
@ -110,6 +167,7 @@ def update(verbose: bool = False):
|
||||||
print("Failed to convert app {}".format(app))
|
print("Failed to convert app {}".format(app))
|
||||||
print(err)
|
print(err)
|
||||||
|
|
||||||
|
joinThreads(threads)
|
||||||
print("Generated configuration successfully")
|
print("Generated configuration successfully")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,8 @@ import os
|
||||||
import yaml
|
import yaml
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from lib.composegenerator.next.stage1 import createCleanConfigFromV3
|
|
||||||
from lib.composegenerator.v2.networking import getMainContainer
|
from lib.composegenerator.v2.networking import getMainContainer
|
||||||
from lib.composegenerator.shared.networking import getFreePort
|
from lib.composegenerator.shared.networking import assignIpV4
|
||||||
from lib.entropy import deriveEntropy
|
from lib.entropy import deriveEntropy
|
||||||
from typing import List
|
from typing import List
|
||||||
import json
|
import json
|
||||||
|
@ -42,7 +41,6 @@ def getAppRegistry(apps, app_path):
|
||||||
app_metadata = []
|
app_metadata = []
|
||||||
for app in apps:
|
for app in apps:
|
||||||
app_yml_path = os.path.join(app_path, app, 'app.yml')
|
app_yml_path = os.path.join(app_path, app, 'app.yml')
|
||||||
app_cache_path = os.path.join(app_path, app, 'app.cache.json')
|
|
||||||
if os.path.isfile(app_yml_path):
|
if os.path.isfile(app_yml_path):
|
||||||
try:
|
try:
|
||||||
with open(app_yml_path, 'r') as f:
|
with open(app_yml_path, 'r') as f:
|
||||||
|
@ -65,8 +63,6 @@ def getAppRegistry(apps, app_path):
|
||||||
getPortsOldApp(app_yml, app)
|
getPortsOldApp(app_yml, app)
|
||||||
elif version == 3:
|
elif version == 3:
|
||||||
getPortsV3App(app_yml, app)
|
getPortsV3App(app_yml, app)
|
||||||
with open(app_cache_path, 'w') as f:
|
|
||||||
json.dump(createCleanConfigFromV3(app_yml, os.path.dirname(app_path)), f)
|
|
||||||
elif version == 4:
|
elif version == 4:
|
||||||
getPortsV4App(app_yml, app)
|
getPortsV4App(app_yml, app)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -173,6 +169,9 @@ def getPortsV3App(app, appId):
|
||||||
def getPortsV4App(app, appId):
|
def getPortsV4App(app, appId):
|
||||||
for appContainerName in app["services"].keys():
|
for appContainerName in app["services"].keys():
|
||||||
appContainer = app["services"][appContainerName]
|
appContainer = app["services"][appContainerName]
|
||||||
|
if "enable_networking" in appContainer and not appContainer["enable_networking"]:
|
||||||
|
return
|
||||||
|
assignIpV4(appId, appContainerName)
|
||||||
if "port" in appContainer:
|
if "port" in appContainer:
|
||||||
validatePort(appContainerName, appContainer, appContainer["port"], appId, 0)
|
validatePort(appContainerName, appContainer, appContainer["port"], appId, 0)
|
||||||
if "required_ports" in appContainer:
|
if "required_ports" in appContainer:
|
||||||
|
|
|
@ -13,7 +13,7 @@ CITADEL_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/..)"
|
||||||
result=$(docker compose \
|
result=$(docker compose \
|
||||||
--file "${CITADEL_ROOT}/docker-compose.yml" \
|
--file "${CITADEL_ROOT}/docker-compose.yml" \
|
||||||
--env-file "${CITADEL_ROOT}/.env" \
|
--env-file "${CITADEL_ROOT}/.env" \
|
||||||
exec lnd lncli "$@")
|
exec lightning lncli "$@")
|
||||||
|
|
||||||
# We need to echo with quotes to preserve output formatting
|
# We need to echo with quotes to preserve output formatting
|
||||||
echo "$result"
|
echo "$result"
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
compose: v2.6.0
|
compose: v2.6.0
|
||||||
bitcoin: 22.0
|
bitcoin: 22.0
|
||||||
lnd: 0.14.3
|
lnd: 0.14.3
|
||||||
dashboard: v0.0.13@sha256:86be4a105e9599163866d2e4f79c4d4ab9725ce56d71d7d7341e4d0ab3441b54
|
dashboard: ghcr.io/runcitadel/dashboard:v0.0.15@sha256:a2cf5ad79367fb083db0f61e5a296aafee655c99af0c228680644c248ec674a5
|
||||||
manager: v0.0.14@sha256:656efb89b5e0e849c40666ae6c4a36cf0def136fe0fab2da52889dacd7c2b688
|
manager: ghcr.io/runcitadel/manager:v0.0.15@sha256:9fb5a86d9e40a04f93d5b6110d43a0f9a5c4ad6311a843b5442290013196a5ce
|
||||||
middleware: v0.0.10@sha256:afd6e2b6f5ba27cde32f6f6d630ddc6dd46d1072871f7834d7424d0554d0f53d
|
middleware: ghcr.io/runcitadel/middleware:v0.0.11@sha256:e472da8cbfa67d9a9dbf321334fe65cdf20a0f9b6d6bab33fdf07210f54e7002
|
||||||
|
app-cli: ghcr.io/runcitadel/app-cli:main@sha256:f532923eac28cfac03579cbb440397bcf16c8730f291b39eeada8278331f7054
|
||||||
|
|
|
@ -100,7 +100,7 @@ services:
|
||||||
ipv4_address: $LND_IP
|
ipv4_address: $LND_IP
|
||||||
dashboard:
|
dashboard:
|
||||||
container_name: dashboard
|
container_name: dashboard
|
||||||
image: ghcr.io/runcitadel/dashboard:v0.0.13@sha256:86be4a105e9599163866d2e4f79c4d4ab9725ce56d71d7d7341e4d0ab3441b54
|
image: ghcr.io/runcitadel/dashboard:v0.0.15@sha256:a2cf5ad79367fb083db0f61e5a296aafee655c99af0c228680644c248ec674a5
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
stop_grace_period: 1m30s
|
stop_grace_period: 1m30s
|
||||||
networks:
|
networks:
|
||||||
|
@ -108,7 +108,7 @@ services:
|
||||||
ipv4_address: $DASHBOARD_IP
|
ipv4_address: $DASHBOARD_IP
|
||||||
manager:
|
manager:
|
||||||
container_name: manager
|
container_name: manager
|
||||||
image: ghcr.io/runcitadel/manager:v0.0.14@sha256:656efb89b5e0e849c40666ae6c4a36cf0def136fe0fab2da52889dacd7c2b688
|
image: ghcr.io/runcitadel/manager:v0.0.15@sha256:9fb5a86d9e40a04f93d5b6110d43a0f9a5c4ad6311a843b5442290013196a5ce
|
||||||
depends_on:
|
depends_on:
|
||||||
- tor
|
- tor
|
||||||
- redis
|
- redis
|
||||||
|
@ -162,7 +162,7 @@ services:
|
||||||
ipv4_address: $MANAGER_IP
|
ipv4_address: $MANAGER_IP
|
||||||
middleware:
|
middleware:
|
||||||
container_name: middleware
|
container_name: middleware
|
||||||
image: ghcr.io/runcitadel/middleware:v0.0.10@sha256:afd6e2b6f5ba27cde32f6f6d630ddc6dd46d1072871f7834d7424d0554d0f53d
|
image: ghcr.io/runcitadel/middleware:v0.0.11@sha256:e472da8cbfa67d9a9dbf321334fe65cdf20a0f9b6d6bab33fdf07210f54e7002
|
||||||
depends_on:
|
depends_on:
|
||||||
- manager
|
- manager
|
||||||
- bitcoin
|
- bitcoin
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"version": "0.0.5-rc.1",
|
"version": "0.0.5",
|
||||||
"name": "Citadel 0.0.5-rc.1",
|
"name": "Citadel 0.0.5",
|
||||||
"requires": ">=0.0.1",
|
"requires": ">=0.0.1",
|
||||||
"notes": "This update fixes a few bugs in the 0.0.4 release that were preventing some apps from working correctly."
|
"notes": "This update fixes a few bugs in the 0.0.4 release that were preventing some apps from working correctly."
|
||||||
}
|
}
|
||||||
|
|
9
scripts/configure
vendored
9
scripts/configure
vendored
|
@ -400,6 +400,15 @@ print("Generated configuration files\n")
|
||||||
print("Checking if Docker Compose is installed...")
|
print("Checking if Docker Compose is installed...")
|
||||||
download_docker_compose()
|
download_docker_compose()
|
||||||
|
|
||||||
|
print("Updating core services...")
|
||||||
|
print()
|
||||||
|
with open("docker-compose.yml", 'r') as stream:
|
||||||
|
compose = yaml.safe_load(stream)
|
||||||
|
for service in ["manager", "middleware", "dashboard"]:
|
||||||
|
compose["services"][service]["image"] = dependencies[service]
|
||||||
|
with open("docker-compose.yml", "w") as stream:
|
||||||
|
yaml.dump(compose, stream, sort_keys=False)
|
||||||
|
|
||||||
if not reconfiguring:
|
if not reconfiguring:
|
||||||
print("Updating apps...\n")
|
print("Updating apps...\n")
|
||||||
os.system('./scripts/app --invoked-by-configure update')
|
os.system('./scripts/app --invoked-by-configure update')
|
||||||
|
|
|
@ -21,7 +21,17 @@ function get_memory_usage() {
|
||||||
function mem_usage_to_percent() {
|
function mem_usage_to_percent() {
|
||||||
local mem_usage="$1"
|
local mem_usage="$1"
|
||||||
local total_mem="$(free -m | awk 'NR==2 {print $2}')"
|
local total_mem="$(free -m | awk 'NR==2 {print $2}')"
|
||||||
echo "$(awk "BEGIN {printf \"%.1f\", $mem_usage / $total_mem * 100}")"
|
echo "$(awk "BEGIN {printf \"%.1f\", ${mem_usage/,/.} / ${total_mem/,/.} * 100}")"
|
||||||
|
}
|
||||||
|
|
||||||
|
function app_mem_usage() {
|
||||||
|
# For every container of the app, get the mem usage, save it, and at the end, print the total mem usage of the app
|
||||||
|
local mem_usage=0
|
||||||
|
for container in $(get_app_containers "$1"); do
|
||||||
|
# Use awk to add, it supports floating point numbers
|
||||||
|
mem_usage=$(awk "BEGIN {printf \"%.2f\", $mem_usage + $(get_memory_usage "$container")}")
|
||||||
|
done
|
||||||
|
echo "${1}: $mem_usage%"
|
||||||
}
|
}
|
||||||
|
|
||||||
get_total_used_mem_raw() {
|
get_total_used_mem_raw() {
|
||||||
|
@ -35,7 +45,7 @@ get_total_used_mem() {
|
||||||
# To get the containers of the app, list every container whose name starts with the name of the app
|
# To get the containers of the app, list every container whose name starts with the name of the app
|
||||||
get_app_containers () {
|
get_app_containers () {
|
||||||
local app_name="$1"
|
local app_name="$1"
|
||||||
"${CITADEL_ROOT}/scripts/app" compose "${app_name}" ps | awk '{print $1}' | grep -v 'Name\|-----'
|
"${CITADEL_ROOT}/scripts/app" compose "${app_name}" ps | awk '{print $1}' | grep -v 'NAME'
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get the memory usage of the whole system, excluding docker containers
|
# Get the memory usage of the whole system, excluding docker containers
|
||||||
|
@ -48,22 +58,17 @@ get_system_memory_usage() {
|
||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
echo "total: $(get_total_used_mem)%"&
|
echo "total: $(get_total_used_mem)%"
|
||||||
|
echo "system: $(get_system_memory_usage)%"
|
||||||
for service in bitcoin lightning electrum tor; do
|
for service in bitcoin lightning electrum tor; do
|
||||||
echo "${service}: $(get_memory_usage "$service")%" &
|
echo "${service}: $(get_memory_usage "$service")%" &
|
||||||
done
|
done
|
||||||
for app in $("${CITADEL_ROOT}/scripts/app" ls-installed); do
|
for app in $("${CITADEL_ROOT}/scripts/app" ls-installed); do
|
||||||
# For every container of the app, get the mem usage, save it, and at the end, print the total mem usage of the app
|
app_mem_usage "${app}" &
|
||||||
local mem_usage=0
|
|
||||||
for container in $(get_app_containers "$app"); do
|
|
||||||
# Use awk to add, it supports floating point numbers
|
|
||||||
mem_usage=$(awk "BEGIN {printf \"%.2f\", $mem_usage + $(get_memory_usage "$container")}")
|
|
||||||
done
|
|
||||||
wait
|
|
||||||
echo "${app}: $mem_usage%"
|
|
||||||
done
|
done
|
||||||
echo "system: $(get_system_memory_usage)%"
|
|
||||||
wait
|
wait
|
||||||
}
|
}
|
||||||
|
|
||||||
|
echo "Calculating memory usage..."
|
||||||
|
echo "This may take a while, please wait..."
|
||||||
main | sort --key 2 --numeric-sort --reverse
|
main | sort --key 2 --numeric-sort --reverse
|
||||||
|
|
|
@ -90,7 +90,7 @@ echo
|
||||||
echo "Starting status monitors..."
|
echo "Starting status monitors..."
|
||||||
echo
|
echo
|
||||||
pkill -f ./scripts/status-monitor || true
|
pkill -f ./scripts/status-monitor || true
|
||||||
./scripts/status-monitor memory 60 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
./scripts/status-monitor memory 300 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
||||||
./scripts/status-monitor storage 60 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
./scripts/status-monitor storage 60 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
||||||
./scripts/status-monitor temperature 15 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
./scripts/status-monitor temperature 15 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
||||||
./scripts/status-monitor uptime 15 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
./scripts/status-monitor uptime 15 &>> "${CITADEL_LOGS}/status-monitor.log" &
|
||||||
|
|
|
@ -4,6 +4,9 @@
|
||||||
#
|
#
|
||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# To prevent tools we use from outputting in another language
|
||||||
|
LANG=C
|
||||||
|
|
||||||
CITADEL_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/../..)"
|
CITADEL_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/../..)"
|
||||||
|
|
||||||
memory_total_bytes() {
|
memory_total_bytes() {
|
||||||
|
@ -23,7 +26,7 @@ get_app_memory_use() {
|
||||||
|
|
||||||
local app_memory=0
|
local app_memory=0
|
||||||
|
|
||||||
local app_containers=$("${CITADEL_ROOT}/app/app-manager.py" compose "${app}" ps | awk '{print $1}' | grep -v 'Name\|-----')
|
local app_containers=$("${CITADEL_ROOT}/scripts/app" compose "${app}" ps | awk '{print $1}' | grep -v 'NAME')
|
||||||
for container in $app_containers; do
|
for container in $app_containers; do
|
||||||
local container_memory=$(get_container_memory_use "${container}")
|
local container_memory=$(get_container_memory_use "${container}")
|
||||||
app_memory=$(awk "BEGIN {print ${app_memory}+${container_memory}}")
|
app_memory=$(awk "BEGIN {print ${app_memory}+${container_memory}}")
|
||||||
|
@ -43,7 +46,7 @@ used=$(memory_used_bytes)
|
||||||
json=$(echo $json | jq --arg used "${used}" '. + {used: $used|tonumber}')
|
json=$(echo $json | jq --arg used "${used}" '. + {used: $used|tonumber}')
|
||||||
|
|
||||||
cumulative_app_memory="0"
|
cumulative_app_memory="0"
|
||||||
for app in $( "${CITADEL_ROOT}/app/app-manager.py" ls-installed); do
|
for app in $( "${CITADEL_ROOT}/scripts/app" ls-installed); do
|
||||||
app_memory=$(get_app_memory_use "${app}")
|
app_memory=$(get_app_memory_use "${app}")
|
||||||
cumulative_app_memory=$(($cumulative_app_memory+$app_memory))
|
cumulative_app_memory=$(($cumulative_app_memory+$app_memory))
|
||||||
json=$(echo $json | jq --arg app "${app}" --arg app_memory "${app_memory}" '.breakdown |= .+ [{id: $app, used: $app_memory|tonumber}]')
|
json=$(echo $json | jq --arg app "${app}" --arg app_memory "${app_memory}" '.breakdown |= .+ [{id: $app, used: $app_memory|tonumber}]')
|
||||||
|
|
|
@ -31,6 +31,21 @@ args = parser.parse_args()
|
||||||
# Function to install a service
|
# Function to install a service
|
||||||
# To install it, read the service's YAML file (nodeRoot/services/name.yml) and add it to the main compose file (nodeRoot/docker-compose.yml)
|
# To install it, read the service's YAML file (nodeRoot/services/name.yml) and add it to the main compose file (nodeRoot/docker-compose.yml)
|
||||||
def setService(name, implementation):
|
def setService(name, implementation):
|
||||||
|
# Get all available services
|
||||||
|
services = next(os.walk(os.path.join(nodeRoot, "services")))[1]
|
||||||
|
|
||||||
|
if not name in services:
|
||||||
|
print("\"{}\" is not a valid service.".format(name))
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
# Get all available implementations
|
||||||
|
implementations = next(os.walk(os.path.join(nodeRoot, "services", name)), (None, None, []))[2]
|
||||||
|
implementations = [x.split('.')[0] for x in implementations]
|
||||||
|
|
||||||
|
if not implementation in implementations:
|
||||||
|
print("\"{}\" is not a valid implementation.".format(implementation))
|
||||||
|
exit(1)
|
||||||
|
|
||||||
# Read the YAML file
|
# Read the YAML file
|
||||||
with open(os.path.join(nodeRoot, "services", name, implementation + ".yml"), 'r') as stream:
|
with open(os.path.join(nodeRoot, "services", name, implementation + ".yml"), 'r') as stream:
|
||||||
service = yaml.safe_load(stream)
|
service = yaml.safe_load(stream)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user