mirror of
https://github.com/runcitadel/core.git
synced 2024-11-12 00:39:53 +00:00
Implement quick updates (#56)
Co-authored-by: nolim1t - f6287b82CC84bcbd <nolim1t@users.noreply.github.com> Co-authored-by: Philipp Walter <philippwalter@pm.me>
This commit is contained in:
parent
61f5f9f1e0
commit
cea004770c
|
@ -3,7 +3,7 @@
|
|||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# A collection of fully FLOSS app definitions and FLOSS apps for Citadel.
|
||||
https://github.com/runcitadel/apps v3-stable
|
||||
https://github.com/runcitadel/apps v4-beta
|
||||
|
||||
# Some apps modified version of Umbrel apps, and their app definitions aren't FLOSS yet.
|
||||
# Include them anyway, but as a separate repo.
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
from lib.citadelutils import classToDict
|
||||
from lib.composegenerator.shared.env import validateEnv
|
||||
|
||||
from lib.composegenerator.v3.types import App, generateApp
|
||||
from lib.composegenerator.v3.generate import convertContainerPermissions
|
||||
|
||||
def createCleanConfigFromV3(app: dict, nodeRoot: str):
|
||||
parsedApp: App = generateApp(app)
|
||||
for container in range(len(parsedApp.containers)):
|
||||
# TODO: Make this dynamic and not hardcoded
|
||||
if parsedApp.containers[container].requires and "c-lightning" in parsedApp.containers[container].requires:
|
||||
parsedApp.containers[container] = None
|
||||
parsedApp = convertContainerPermissions(parsedApp)
|
||||
parsedApp = validateEnv(parsedApp)
|
||||
finalApp = classToDict(parsedApp)
|
||||
try:
|
||||
finalApp['permissions'] = finalApp['metadata']['dependencies']
|
||||
except:
|
||||
finalApp['permissions'] = []
|
||||
finalApp['id'] = finalApp['metadata']['id']
|
||||
del finalApp['metadata']
|
||||
# Set version of the cache file format
|
||||
finalApp['version'] = "1"
|
||||
return finalApp
|
|
@ -6,9 +6,11 @@ import stat
|
|||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import random
|
||||
from typing import List
|
||||
from sys import argv
|
||||
import os
|
||||
import fcntl
|
||||
import requests
|
||||
import shutil
|
||||
import json
|
||||
|
@ -32,9 +34,31 @@ from lib.validate import findAndValidateApps
|
|||
from lib.metadata import getAppRegistry
|
||||
from lib.entropy import deriveEntropy
|
||||
|
||||
class FileLock:
|
||||
"""Implements a file-based lock using flock(2).
|
||||
The lock file is saved in directory dir with name lock_name.
|
||||
dir is the current directory by default.
|
||||
"""
|
||||
|
||||
def __init__(self, lock_name, dir="."):
|
||||
self.lock_file = open(os.path.join(dir, lock_name), "w")
|
||||
|
||||
def acquire(self, blocking=True):
|
||||
"""Acquire the lock.
|
||||
If the lock is not already acquired, return None. If the lock is
|
||||
acquired and blocking is True, block until the lock is released. If
|
||||
the lock is acquired and blocking is False, raise an IOError.
|
||||
"""
|
||||
ops = fcntl.LOCK_EX
|
||||
if not blocking:
|
||||
ops |= fcntl.LOCK_NB
|
||||
fcntl.flock(self.lock_file, ops)
|
||||
|
||||
def release(self):
|
||||
"""Release the lock. Return None even if lock not currently acquired"""
|
||||
fcntl.flock(self.lock_file, fcntl.LOCK_UN)
|
||||
|
||||
# For an array of threads, join them and wait for them to finish
|
||||
|
||||
|
||||
def joinThreads(threads: List[threading.Thread]):
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
@ -50,26 +74,58 @@ updateIgnore = os.path.join(appsDir, ".updateignore")
|
|||
appDataDir = os.path.join(nodeRoot, "app-data")
|
||||
userFile = os.path.join(nodeRoot, "db", "user.json")
|
||||
legacyScript = os.path.join(nodeRoot, "scripts", "app")
|
||||
with open(os.path.join(nodeRoot, "db", "dependencies.yml"), "r") as file:
|
||||
dependencies = yaml.safe_load(file)
|
||||
|
||||
|
||||
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
|
||||
|
||||
|
||||
def getArguments():
|
||||
arguments = ""
|
||||
for i in range(3, len(argv)):
|
||||
arguments += argv[i] + " "
|
||||
return arguments
|
||||
|
||||
def handleAppV4(app):
|
||||
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
|
||||
os.chown(os.path.join(appsDir, app), 1000, 1000)
|
||||
os.system("docker run --rm -v {}:/apps -u 1000:1000 {} /app-cli convert --app-name '{}' --port-map /apps/ports.json /apps/{}/app.yml /apps/{}/result.yml --services 'lnd'".format(appsDir, dependencies['app-cli'], app, app, app))
|
||||
with open(os.path.join(appsDir, app, "result.yml"), "r") as resultFile:
|
||||
resultYml = yaml.safe_load(resultFile)
|
||||
with open(composeFile, "w") as dockerComposeFile:
|
||||
yaml.dump(resultYml["spec"], dockerComposeFile)
|
||||
torDaemons = ["torrc-apps", "torrc-apps-2", "torrc-apps-3"]
|
||||
torFileToAppend = torDaemons[random.randint(0, len(torDaemons) - 1)]
|
||||
with open(os.path.join(nodeRoot, "tor", torFileToAppend), 'a') as f:
|
||||
f.write(resultYml["new_tor_entries"])
|
||||
mainPort = resultYml["port"]
|
||||
registryFile = os.path.join(nodeRoot, "apps", "registry.json")
|
||||
registry: list = []
|
||||
lock = FileLock("citadeL_registry_lock", dir="/tmp")
|
||||
lock.acquire()
|
||||
if os.path.isfile(registryFile):
|
||||
with open(registryFile, 'r') as f:
|
||||
registry = json.load(f)
|
||||
else:
|
||||
raise Exception("Registry file not found")
|
||||
|
||||
for registryApp in registry:
|
||||
if registryApp['id'] == app:
|
||||
registry[registry.index(registryApp)]['port'] = resultYml["port"]
|
||||
break
|
||||
|
||||
with open(registryFile, 'w') as f:
|
||||
json.dump(registry, f, indent=4, sort_keys=True)
|
||||
lock.release()
|
||||
|
||||
def getAppYml(name):
|
||||
with open(os.path.join(appsDir, "sourceMap.json"), "r") as f:
|
||||
sourceMap = json.load(f)
|
||||
if not name in sourceMap:
|
||||
print("Warning: App {} is not in the source map".format(name))
|
||||
print("Warning: App {} is not in the source map".format(name), file=sys.stderr)
|
||||
sourceMap = {
|
||||
name: {
|
||||
"githubRepo": "runcitadel/core",
|
||||
"branch": "v2"
|
||||
"githubRepo": "runcitadel/apps",
|
||||
"branch": "v4-stable"
|
||||
}
|
||||
}
|
||||
url = 'https://raw.githubusercontent.com/{}/{}/apps/{}/app.yml'.format(sourceMap[name]["githubRepo"], sourceMap[name]["branch"], name)
|
||||
|
@ -89,16 +145,31 @@ def update(verbose: bool = False):
|
|||
json.dump(registry["ports"], f, sort_keys=True)
|
||||
print("Wrote registry to registry.json")
|
||||
|
||||
os.system("docker pull {}".format(dependencies['app-cli']))
|
||||
threads = list()
|
||||
# Loop through the apps and generate valid compose files from them, then put these into the app dir
|
||||
for app in apps:
|
||||
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
|
||||
appYml = os.path.join(appsDir, app, "app.yml")
|
||||
with open(composeFile, "w") as f:
|
||||
appCompose = getApp(appYml, app)
|
||||
if appCompose:
|
||||
f.write(yaml.dump(appCompose, sort_keys=False))
|
||||
if verbose:
|
||||
print("Wrote " + app + " to " + composeFile)
|
||||
try:
|
||||
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
|
||||
appYml = os.path.join(appsDir, app, "app.yml")
|
||||
with open(appYml, 'r') as f:
|
||||
appDefinition = yaml.safe_load(f)
|
||||
if 'citadel_version' in appDefinition:
|
||||
thread = threading.Thread(target=handleAppV4, args=(app,))
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
else:
|
||||
appCompose = getApp(appDefinition, app)
|
||||
with open(composeFile, "w") as f:
|
||||
if appCompose:
|
||||
f.write(yaml.dump(appCompose, sort_keys=False))
|
||||
if verbose:
|
||||
print("Wrote " + app + " to " + composeFile)
|
||||
except Exception as err:
|
||||
print("Failed to convert app {}".format(app))
|
||||
print(err)
|
||||
|
||||
joinThreads(threads)
|
||||
print("Generated configuration successfully")
|
||||
|
||||
|
||||
|
@ -158,12 +229,7 @@ def stopInstalled():
|
|||
joinThreads(threads)
|
||||
|
||||
# Loads an app.yml and converts it to a docker-compose.yml
|
||||
|
||||
|
||||
def getApp(appFile: str, appId: str):
|
||||
with open(appFile, 'r') as f:
|
||||
app = yaml.safe_load(f)
|
||||
|
||||
def getApp(app, appId: str):
|
||||
if not "metadata" in app:
|
||||
raise Exception("Error: Could not find metadata in " + appFile)
|
||||
app["metadata"]["id"] = appId
|
||||
|
@ -175,6 +241,7 @@ def getApp(appFile: str, appId: str):
|
|||
print("Warning: App {} uses version 2 of the app.yml format, which is scheduled for removal in Citadel 0.2.0".format(appId))
|
||||
return createComposeConfigFromV2(app, nodeRoot)
|
||||
elif 'version' in app and str(app['version']) == "3":
|
||||
print("Warning: App {} uses version 3 of the app.yml format, which is scheduled for removal in Citadel 0.3.0".format(appId))
|
||||
return createComposeConfigFromV3(app, nodeRoot)
|
||||
else:
|
||||
raise Exception("Error: Unsupported version of app.yml")
|
||||
|
|
|
@ -4,10 +4,10 @@
|
|||
|
||||
import os
|
||||
import yaml
|
||||
import traceback
|
||||
|
||||
from lib.composegenerator.next.stage1 import createCleanConfigFromV3
|
||||
from lib.composegenerator.v2.networking import getMainContainer
|
||||
from lib.composegenerator.v1.networking import getFreePort
|
||||
from lib.composegenerator.shared.networking import assignIpV4
|
||||
from lib.entropy import deriveEntropy
|
||||
from typing import List
|
||||
import json
|
||||
|
@ -41,11 +41,15 @@ def getAppRegistry(apps, app_path):
|
|||
app_metadata = []
|
||||
for app in apps:
|
||||
app_yml_path = os.path.join(app_path, app, 'app.yml')
|
||||
app_cache_path = os.path.join(app_path, app, 'app.cache.json')
|
||||
if os.path.isfile(app_yml_path):
|
||||
try:
|
||||
with open(app_yml_path, 'r') as f:
|
||||
app_yml = yaml.safe_load(f.read())
|
||||
version = False
|
||||
if 'version' in app_yml:
|
||||
version = int(app_yml['version'])
|
||||
elif 'citadel_version' in app_yml:
|
||||
version = int(app_yml['citadel_version'])
|
||||
metadata: dict = app_yml['metadata']
|
||||
metadata['id'] = app
|
||||
metadata['path'] = metadata.get('path', '')
|
||||
|
@ -55,14 +59,14 @@ def getAppRegistry(apps, app_path):
|
|||
if "mainContainer" in metadata:
|
||||
metadata.pop("mainContainer")
|
||||
app_metadata.append(metadata)
|
||||
if(app_yml["version"] != 3):
|
||||
if version < 3:
|
||||
getPortsOldApp(app_yml, app)
|
||||
else:
|
||||
elif version == 3:
|
||||
getPortsV3App(app_yml, app)
|
||||
with open(app_cache_path, 'w') as f:
|
||||
json.dump(createCleanConfigFromV3(app_yml, os.path.dirname(app_path)), f)
|
||||
elif version == 4:
|
||||
getPortsV4App(app_yml, app)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(traceback.format_exc())
|
||||
print("App {} is invalid!".format(app))
|
||||
appPortsToMap()
|
||||
return {
|
||||
|
@ -97,12 +101,12 @@ def getNewPort(usedPorts):
|
|||
lastPort2 = lastPort2 + 1
|
||||
return lastPort2
|
||||
|
||||
def validatePort(appContainer, port, appId, priority: int, isDynamic = False):
|
||||
def validatePort(containerName, appContainer, port, appId, priority: int, isDynamic = False):
|
||||
if port not in appPorts and port not in citadelPorts and port != 0:
|
||||
appPorts[port] = {
|
||||
"app": appId,
|
||||
"port": port,
|
||||
"container": appContainer["name"],
|
||||
"container": containerName,
|
||||
"priority": priority,
|
||||
"dynamic": isDynamic,
|
||||
}
|
||||
|
@ -115,7 +119,7 @@ def validatePort(appContainer, port, appId, priority: int, isDynamic = False):
|
|||
appPorts[port] = {
|
||||
"app": appId,
|
||||
"port": port,
|
||||
"container": appContainer["name"],
|
||||
"container": containerName,
|
||||
"priority": priority,
|
||||
"dynamic": isDynamic,
|
||||
}
|
||||
|
@ -128,7 +132,7 @@ def validatePort(appContainer, port, appId, priority: int, isDynamic = False):
|
|||
appPorts[newPort] = {
|
||||
"app": appId,
|
||||
"port": port,
|
||||
"container": appContainer["name"],
|
||||
"container": containerName,
|
||||
"priority": priority,
|
||||
"dynamic": isDynamic,
|
||||
}
|
||||
|
@ -136,28 +140,44 @@ def validatePort(appContainer, port, appId, priority: int, isDynamic = False):
|
|||
def getPortsOldApp(app, appId):
|
||||
for appContainer in app["containers"]:
|
||||
if "port" in appContainer:
|
||||
validatePort(appContainer, appContainer["port"], appId, 0)
|
||||
validatePort(appContainer["name"], appContainer, appContainer["port"], appId, 0)
|
||||
if "ports" in appContainer:
|
||||
for port in appContainer["ports"]:
|
||||
realPort = int(str(port).split(":")[0])
|
||||
validatePort(appContainer, realPort, appId, 2)
|
||||
validatePort(appContainer["name"], appContainer, realPort, appId, 2)
|
||||
|
||||
|
||||
def getPortsV3App(app, appId):
|
||||
for appContainer in app["containers"]:
|
||||
if "port" in appContainer:
|
||||
if "preferredOutsidePort" in appContainer and "requiresPort" in appContainer and appContainer["requiresPort"]:
|
||||
validatePort(appContainer, appContainer["preferredOutsidePort"], appId, 2)
|
||||
validatePort(appContainer["name"], appContainer, appContainer["preferredOutsidePort"], appId, 2)
|
||||
elif "preferredOutsidePort" in appContainer:
|
||||
|
||||
validatePort(appContainer, appContainer["preferredOutsidePort"], appId, 1)
|
||||
validatePort(appContainer["name"], appContainer, appContainer["preferredOutsidePort"], appId, 1)
|
||||
else:
|
||||
validatePort(appContainer, appContainer["port"], appId, 0)
|
||||
validatePort(appContainer["name"], appContainer, appContainer["port"], appId, 0)
|
||||
elif "requiredPorts" not in appContainer and "requiredUdpPorts" not in appContainer:
|
||||
validatePort(appContainer, getNewPort(appPorts.keys()), appId, 0, True)
|
||||
validatePort(appContainer["name"], appContainer, getNewPort(appPorts.keys()), appId, 0, True)
|
||||
if "requiredPorts" in appContainer:
|
||||
for port in appContainer["requiredPorts"]:
|
||||
validatePort(appContainer, port, appId, 2)
|
||||
validatePort(appContainer["name"], appContainer, port, appId, 2)
|
||||
if "requiredUdpPorts" in appContainer:
|
||||
for port in appContainer["requiredUdpPorts"]:
|
||||
validatePort(appContainer, port, appId, 2)
|
||||
validatePort(appContainer["name"], appContainer, port, appId, 2)
|
||||
|
||||
def getPortsV4App(app, appId):
|
||||
for appContainerName in app["services"].keys():
|
||||
appContainer = app["services"][appContainerName]
|
||||
if "enable_networking" in appContainer and not appContainer["enable_networking"]:
|
||||
return
|
||||
assignIpV4(appId, appContainerName)
|
||||
if "port" in appContainer:
|
||||
validatePort(appContainerName, appContainer, appContainer["port"], appId, 0)
|
||||
if "required_ports" in appContainer:
|
||||
if "tcp" in appContainer["required_ports"]:
|
||||
for port in appContainer["required_ports"]["tcp"].keys():
|
||||
validatePort(appContainerName, appContainer, port, appId, 2)
|
||||
if "udp" in appContainer["required_ports"]:
|
||||
for port in appContainer["required_ports"]["udp"].keys():
|
||||
validatePort(appContainerName, appContainer, port, appId, 2)
|
||||
|
|
|
@ -6,6 +6,7 @@ import os
|
|||
import yaml
|
||||
from jsonschema import validate
|
||||
import yaml
|
||||
import traceback
|
||||
|
||||
scriptDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
|
||||
|
||||
|
@ -33,7 +34,7 @@ def validateApp(app: dict):
|
|||
return True
|
||||
# Catch and log any errors, and return false
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(traceback.format_exc())
|
||||
return False
|
||||
elif 'version' in app and str(app['version']) == "3":
|
||||
try:
|
||||
|
@ -41,12 +42,13 @@ def validateApp(app: dict):
|
|||
return True
|
||||
# Catch and log any errors, and return false
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(traceback.format_exc())
|
||||
return False
|
||||
else:
|
||||
elif 'version' not in app and 'citadel_version' not in app:
|
||||
print("Unsupported app version")
|
||||
return False
|
||||
|
||||
else:
|
||||
return True
|
||||
|
||||
# Read in an app.yml file and pass it to the validation function
|
||||
# Returns true if valid, false otherwise
|
||||
|
@ -72,14 +74,17 @@ def findApps(dir: str):
|
|||
def findAndValidateApps(dir: str):
|
||||
apps = []
|
||||
app_data = {}
|
||||
for root, dirs, files in os.walk(dir, topdown=False):
|
||||
for name in dirs:
|
||||
app_dir = os.path.join(root, name)
|
||||
if os.path.isfile(os.path.join(app_dir, "app.yml")):
|
||||
apps.append(name)
|
||||
# Read the app.yml and append it to app_data
|
||||
with open(os.path.join(app_dir, "app.yml"), 'r') as f:
|
||||
app_data[name] = yaml.safe_load(f)
|
||||
for subdir in os.scandir(dir):
|
||||
if not subdir.is_dir():
|
||||
continue
|
||||
app_dir = subdir.path
|
||||
if os.path.isfile(os.path.join(app_dir, "app.yml")):
|
||||
apps.append(subdir.name)
|
||||
# Read the app.yml and append it to app_data
|
||||
with open(os.path.join(app_dir, "app.yml"), 'r') as f:
|
||||
app_data[subdir.name] = yaml.safe_load(f)
|
||||
else:
|
||||
print("App {} has no app.yml".format(subdir.name))
|
||||
# Now validate all the apps using the validateAppFile function by passing the app.yml as an argument to it, if an app is invalid, remove it from the list
|
||||
for app in apps:
|
||||
appyml = app_data[app]
|
||||
|
@ -113,12 +118,13 @@ def findAndValidateApps(dir: str):
|
|||
should_continue=False
|
||||
if not should_continue:
|
||||
continue
|
||||
for container in appyml['containers']:
|
||||
if 'permissions' in container:
|
||||
for permission in container['permissions']:
|
||||
if permission not in appyml['metadata']['dependencies'] and permission not in ["root", "hw"]:
|
||||
print("WARNING: App {}'s container '{}' requires the '{}' permission, but the app doesn't list it in it's dependencies".format(app, container['name'], permission))
|
||||
apps.remove(app)
|
||||
# Skip to the next iteration of the loop
|
||||
continue
|
||||
if 'containers' in appyml:
|
||||
for container in appyml['containers']:
|
||||
if 'permissions' in container:
|
||||
for permission in container['permissions']:
|
||||
if permission not in appyml['metadata']['dependencies'] and permission not in ["root", "hw"]:
|
||||
print("WARNING: App {}'s container '{}' requires the '{}' permission, but the app doesn't list it in it's dependencies".format(app, container['name'], permission))
|
||||
apps.remove(app)
|
||||
# Skip to the next iteration of the loop
|
||||
continue
|
||||
return apps
|
||||
|
|
5
db/dependencies.yml
Normal file
5
db/dependencies.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
compose: v2.6.0
|
||||
dashboard: ghcr.io/runcitadel/dashboard:main@sha256:25b6fb413c10f47e186309c8737926c241c0f2bec923b2c08dd837b828f14dbd
|
||||
manager: ghcr.io/runcitadel/manager:main@sha256:db5775e986d53e762e43331540bb1c05a27b362da94d587c4a4591c981c00ee4
|
||||
middleware: ghcr.io/runcitadel/middleware:main@sha256:2fbbfb2e818bf0462f74a6aaab192881615ae018e6dcb62a50d05f82ec622cb0
|
||||
app-cli: ghcr.io/runcitadel/app-cli:main@sha256:f532923eac28cfac03579cbb440397bcf16c8730f291b39eeada8278331f7054
|
|
@ -100,7 +100,7 @@ services:
|
|||
ipv4_address: $LND_IP
|
||||
dashboard:
|
||||
container_name: dashboard
|
||||
image: ghcr.io/runcitadel/dashboard:v0.0.15@sha256:a2cf5ad79367fb083db0f61e5a296aafee655c99af0c228680644c248ec674a5
|
||||
image: ghcr.io/runcitadel/dashboard:main@sha256:25b6fb413c10f47e186309c8737926c241c0f2bec923b2c08dd837b828f14dbd
|
||||
restart: on-failure
|
||||
stop_grace_period: 1m30s
|
||||
networks:
|
||||
|
@ -108,7 +108,7 @@ services:
|
|||
ipv4_address: $DASHBOARD_IP
|
||||
manager:
|
||||
container_name: manager
|
||||
image: ghcr.io/runcitadel/manager:v0.0.15@sha256:9fb5a86d9e40a04f93d5b6110d43a0f9a5c4ad6311a843b5442290013196a5ce
|
||||
image: ghcr.io/runcitadel/manager:main@sha256:db5775e986d53e762e43331540bb1c05a27b362da94d587c4a4591c981c00ee4
|
||||
depends_on:
|
||||
- tor
|
||||
- redis
|
||||
|
@ -162,7 +162,7 @@ services:
|
|||
ipv4_address: $MANAGER_IP
|
||||
middleware:
|
||||
container_name: middleware
|
||||
image: ghcr.io/runcitadel/middleware:v0.0.11@sha256:e472da8cbfa67d9a9dbf321334fe65cdf20a0f9b6d6bab33fdf07210f54e7002
|
||||
image: ghcr.io/runcitadel/middleware:main@sha256:2fbbfb2e818bf0462f74a6aaab192881615ae018e6dcb62a50d05f82ec622cb0
|
||||
depends_on:
|
||||
- manager
|
||||
- bitcoin
|
||||
|
@ -223,6 +223,7 @@ services:
|
|||
ipv4_address: $ELECTRUM_IP
|
||||
redis:
|
||||
container_name: redis
|
||||
user: 1000:1000
|
||||
image: redis:7.0.0-bullseye@sha256:ad0705f2e2344c4b642449e658ef4669753d6eb70228d46267685045bf932303
|
||||
working_dir: /data
|
||||
volumes:
|
||||
|
|
25
events/triggers/quick-update
Executable file
25
events/triggers/quick-update
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# SPDX-FileCopyrightText: 2021-2022 Citadel and contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
CITADEL_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/../..)"
|
||||
|
||||
RELEASE=$(cat "$CITADEL_ROOT"/statuses/update-status.json | jq .updateTo -r)
|
||||
|
||||
cat <<EOF > "$CITADEL_ROOT"/statuses/update-status.json
|
||||
{"state": "installing", "progress": 30, "description": "Starting update", "updateTo": "$RELEASE"}
|
||||
EOF
|
||||
|
||||
curl "https://raw.githubusercontent.com/runcitadel/core/${RELEASE}/db/dependencies.yml" > "$CITADEL_ROOT"/db/dependencies
|
||||
cat <<EOF > "$CITADEL_ROOT"/statuses/update-status.json
|
||||
{"state": "installing", "progress": 70, "description": "Starting new containers", "updateTo": "$RELEASE"}
|
||||
EOF
|
||||
|
||||
"${CITADEL_ROOT}/scripts/start"
|
||||
|
||||
cat <<EOF > "$CITADEL_ROOT"/statuses/update-status.json
|
||||
{"state": "success", "progress": 100, "description": "Successfully installed Citadel $RELEASE", "updateTo": ""}
|
||||
EOF
|
||||
|
|
@ -7,3 +7,4 @@
|
|||
CITADEL_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/../..)"
|
||||
|
||||
"${CITADEL_ROOT}/scripts/set-update-channel" "${1}"
|
||||
"${CITADEL_ROOT}/scripts/start"
|
||||
|
|
|
@ -2,5 +2,6 @@
|
|||
"version": "0.0.7",
|
||||
"name": "Citadel 0.0.7",
|
||||
"requires": ">=0.0.1",
|
||||
"isQuickUpdate": false,
|
||||
"notes": "While we are busy with the next huge update, you may need to wait longer for updates. This update updates Bitcoin Knots and LND to their latest versions to ensure apps can utilize their latest features. In addition, this update includes the Citadel CLI. More information on that will be published soon."
|
||||
}
|
||||
|
|
39
scripts/configure
vendored
39
scripts/configure
vendored
|
@ -31,13 +31,14 @@ if not is_arm64 and not is_amd64:
|
|||
print('Citadel only works on arm64 and amd64!')
|
||||
exit(1)
|
||||
|
||||
dependencies = False
|
||||
|
||||
# Check the output of "docker compose version", if it matches "Docker Compose version v2.0.0-rc.3", return true
|
||||
# Otherwise, return false
|
||||
def is_compose_rc_or_outdated():
|
||||
def is_compose_version_except(target_version):
|
||||
try:
|
||||
output = subprocess.check_output(['docker', 'compose', 'version'])
|
||||
if output.decode('utf-8').strip() != 'Docker Compose version v2.3.3':
|
||||
print("Using outdated Docker Compose, updating...")
|
||||
if output.decode('utf-8').strip() != 'Docker Compose version {}'.format(target_version):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -48,17 +49,19 @@ def is_compose_rc_or_outdated():
|
|||
def download_docker_compose():
|
||||
# Skip if os.path.expanduser('~/.docker/cli-plugins/docker-compose') exists
|
||||
subprocess.check_call(["mkdir", "-p", os.path.expanduser('~/.docker/cli-plugins/')])
|
||||
if (os.path.exists(os.path.expanduser('~/.docker/cli-plugins/docker-compose')) or os.path.exists('/usr/lib/docker/cli-plugins/docker-compose')) and not is_compose_rc_or_outdated():
|
||||
print("Found {}\n".format(subprocess.check_output(['docker', 'compose', 'version']).decode('utf-8').strip()))
|
||||
return
|
||||
|
||||
print("Installing Docker Compose...\n")
|
||||
|
||||
if is_arm64:
|
||||
subprocess.check_call(['wget', 'https://github.com/docker/compose/releases/download/v2.3.3/docker-compose-linux-aarch64', '-O', os.path.expanduser('~/.docker/cli-plugins/docker-compose')])
|
||||
compose_arch = 'aarch64'
|
||||
elif is_amd64:
|
||||
subprocess.check_call(['wget', 'https://github.com/docker/compose/releases/download/v2.3.3/docker-compose-linux-x86_64', '-O', os.path.expanduser('~/.docker/cli-plugins/docker-compose')])
|
||||
os.chmod(os.path.expanduser('~/.docker/cli-plugins/docker-compose'), 0o755)
|
||||
compose_arch = 'x86_64'
|
||||
# We validate that no other case than the two above can happen before
|
||||
|
||||
if is_compose_version_except(dependencies['compose']):
|
||||
print("Docker compose not found or not required version, updating.")
|
||||
compose_url = 'https://github.com/docker/compose/releases/download/{}/docker-compose-linux-{}'.format(dependencies['compose'], compose_arch)
|
||||
compose_file = os.path.expanduser('~/.docker/cli-plugins/docker-compose')
|
||||
subprocess.check_call(['wget', compose_url, '-O', compose_file])
|
||||
os.chmod(compose_file, 0o755)
|
||||
|
||||
|
||||
if not shutil.which("wget"):
|
||||
print('Wget is not installed!')
|
||||
|
@ -72,6 +75,9 @@ if not shutil.which("docker"):
|
|||
CITADEL_ROOT=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
os.chdir(CITADEL_ROOT)
|
||||
|
||||
with open("./db/dependencies.yml", "r") as file:
|
||||
dependencies = yaml.safe_load(file)
|
||||
|
||||
updating = False
|
||||
status_dir = os.path.join(CITADEL_ROOT, 'statuses')
|
||||
# Make sure to use the main status dir for updates
|
||||
|
@ -365,6 +371,15 @@ print("Generated configuration files\n")
|
|||
print("Checking if Docker Compose is installed...")
|
||||
download_docker_compose()
|
||||
|
||||
print("Updating core services...")
|
||||
print()
|
||||
with open("docker-compose.yml", 'r') as stream:
|
||||
compose = yaml.safe_load(stream)
|
||||
for service in ["manager", "middleware", "dashboard"]:
|
||||
compose["services"][service]["image"] = dependencies[service]
|
||||
with open("docker-compose.yml", "w") as stream:
|
||||
yaml.dump(compose, stream, sort_keys=False)
|
||||
|
||||
if not reconfiguring:
|
||||
print("Updating apps...\n")
|
||||
os.system('./scripts/app --invoked-by-configure update')
|
||||
|
|
|
@ -10,7 +10,7 @@ NODE_ROOT="$(readlink -f $(dirname "${BASH_SOURCE[0]}")/..)"
|
|||
# If $1 is not given, fail
|
||||
if [ -z "$1" ]; then
|
||||
echo "Usage: $0 <channel>"
|
||||
echo "Channel can currently either be 'stable' or 'beta'"
|
||||
echo "Channel can currently either be 'stable', 'beta' or 'c-lightning'"
|
||||
exit 1
|
||||
fi
|
||||
sed -i "s/UPDATE_CHANNEL=.*/UPDATE_CHANNEL=${1}/" "${NODE_ROOT}/.env"
|
||||
|
|
Loading…
Reference in New Issue
Block a user