citadel-core/scripts/configure
Aaron Dewes 9054194ab7
Citadel 0.1.0 (#85)
Co-authored-by: nolim1t - f6287b82CC84bcbd <nolim1t@users.noreply.github.com>
Co-authored-by: Aaron Dewes <aaron.dewes@protonmail.com>
Co-authored-by: Philipp Walter <philippwalter@pm.me>
Co-authored-by: Lele <emanuele.lele.calo@gmail.com>
2022-10-26 10:28:31 +02:00

375 lines
13 KiB
Python
Executable File

#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021-2022 Citadel and contributors
#
# SPDX-License-Identifier: GPL-3.0-or-later
import sys
import os
from lib.rpcauth import get_data
import re
import subprocess
import json
import shutil
import yaml
# Print an error if the user isn't running on Linux.
if sys.platform != 'linux':
print('This script only works on Linux!')
exit(1)
# Print an error if user is not root
if os.getuid() != 0:
print('This script must be run as root!')
exit(1)
# Check if the system is arm64 or amd64
is_arm64 = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() == 'aarch64'
is_amd64 = subprocess.check_output(['uname', '-m']).decode('utf-8').strip() == 'x86_64'
if not is_arm64 and not is_amd64:
print('Citadel only works on arm64 and amd64!')
exit(1)
dependencies = False
# Check the output of "docker compose version", if it matches "Docker Compose version v2.0.0-rc.3", return true
# Otherwise, return false
def is_compose_version_except(target_version):
try:
output = subprocess.check_output(['docker', 'compose', 'version'])
if output.decode('utf-8').strip() != 'Docker Compose version {}'.format(target_version):
return True
else:
return False
except:
return True
# Download docker-compose from GitHub and put it in $HOME/.docker/cli-plugins/docker-compose
def download_docker_compose():
# Skip if os.path.expanduser('~/.docker/cli-plugins/docker-compose') exists
subprocess.check_call(["mkdir", "-p", os.path.expanduser('~/.docker/cli-plugins/')])
if is_arm64:
compose_arch = 'aarch64'
elif is_amd64:
compose_arch = 'x86_64'
# We validate that no other case than the two above can happen before
if is_compose_version_except(dependencies['compose']):
print("Docker compose not found or not required version, updating.")
compose_url = 'https://github.com/docker/compose/releases/download/{}/docker-compose-linux-{}'.format(dependencies['compose'], compose_arch)
compose_file = os.path.expanduser('~/.docker/cli-plugins/docker-compose')
subprocess.check_call(['wget', compose_url, '-O', compose_file])
os.chmod(compose_file, 0o755)
if not shutil.which("wget"):
print('Wget is not installed!')
exit(1)
if not shutil.which("docker"):
print('Docker is not installed!')
exit(1)
# Switch to node root directory.
CITADEL_ROOT=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(CITADEL_ROOT)
with open("./db/dependencies.yml", "r") as file:
dependencies = yaml.safe_load(file)
# Configure for appropriate network depending
# upon the user-supplied value of $NETWORK
# If the network is not specified, then use the mainnet
BITCOIN_NETWORK=os.environ.get('NETWORK') or 'mainnet'
# Check if network neither mainnet nor testnet nor regtest
if BITCOIN_NETWORK not in ['mainnet', 'testnet', 'signet', 'regtest']:
print('Error: Network must be either mainnet, testnet, signet or regtest!')
exit(1)
with open(os.path.join(CITADEL_ROOT, "info.json"), 'r') as file:
CITADEL_VERSION=json.load(file)['version']
status_dir = os.path.join(CITADEL_ROOT, 'statuses')
print("\n======================================")
if os.path.isfile(status_dir+'/configured'):
print("=========== RECONFIGURING ============")
reconfiguring=os.path.isfile('./.env')
else:
print("============ CONFIGURING =============")
reconfiguring=False
print("============== CITADEL ==============")
print("======================================\n")
print("Installing additional services")
data = subprocess.run("\"{}\" setup".format(os.path.join(CITADEL_ROOT, "services", "manage.py")), shell=True)
# Parse a dotenv file
# Values can either be KEY=VALUE or KEY="VALUE" or KEY='VALUE'
# Returns all env vars as a dict
def parse_dotenv(file_path):
envVars: dict = {}
with open(file_path, 'r') as file:
for line in file:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
if '=' in line:
key, value = line.split('=', 1)
value = value.strip('"').strip("'")
envVars[key] = value
else:
print("Error: Invalid line in {}: {}".format(file_path, line))
print("Line should be in the format KEY=VALUE or KEY=\"VALUE\" or KEY='VALUE'")
exit(1)
return envVars
##########################################################
############ Generate configuration variables ############
##########################################################
NGINX_PORT=os.environ.get('NGINX_PORT') or "80"
NGINX_SSL_PORT=os.environ.get('NGINX_SSL_PORT') or "443"
UPDATE_CHANNEL="stable"
if reconfiguring:
dotenv=parse_dotenv('./.env')
BITCOIN_NETWORK=os.environ.get('OVERWRITE_NETWORK') or dotenv['BITCOIN_NETWORK']
# Check if network neither mainnet nor testnet nor regtest
if BITCOIN_NETWORK not in ['mainnet', 'testnet', 'signet', 'regtest']:
print('Error: Network must be either mainnet, testnet, signet or regtest!')
exit(1)
print("Using {} network\n".format(BITCOIN_NETWORK))
BITCOIN_RPC_PORT=dotenv['BITCOIN_RPC_PORT']
BITCOIN_P2P_PORT=dotenv['BITCOIN_P2P_PORT']
BITCOIN_RPC_USER=dotenv['BITCOIN_RPC_USER']
BITCOIN_RPC_PASS=dotenv['BITCOIN_RPC_PASS']
BITCOIN_RPC_AUTH=dotenv['BITCOIN_RPC_AUTH']
TOR_PASSWORD=dotenv['TOR_PASSWORD']
TOR_HASHED_PASSWORD=dotenv['TOR_HASHED_PASSWORD']
NGINX_PORT=dotenv['NGINX_PORT']
NGINX_SSL_PORT="443"
if 'NGINX_SSL_PORT' in dotenv:
NGINX_SSL_PORT=dotenv['NGINX_SSL_PORT']
if NGINX_SSL_PORT == "80" and NGINX_PORT == "80":
NGINX_SSL_PORT="443"
if 'UPDATE_CHANNEL' in dotenv and dotenv['UPDATE_CHANNEL'] != "main" and dotenv['UPDATE_CHANNEL'] != "migration":
UPDATE_CHANNEL=dotenv['UPDATE_CHANNEL']
else:
# Generate RPC credentials
print("Generating auth credentials\n")
BITCOIN_RPC_USER="citadel"
BITCOIN_RPC_DETAILS=get_data(BITCOIN_RPC_USER)
BITCOIN_RPC_AUTH=BITCOIN_RPC_DETAILS['auth']
BITCOIN_RPC_PASS=BITCOIN_RPC_DETAILS['password']
# Pull Tor image and generate Tor password
print("Generating Tor password\n")
os.system('docker pull --quiet {}'.format(dependencies["tor"]))
TOR_PASSWORD=get_data('itdoesntmatter')['password']
TOR_HASHED_PASSWORD=os.popen('docker run --rm {} --quiet --hash-password "{}"'.format(dependencies["tor"], TOR_PASSWORD)).read()[:-1]
BITCOIN_NODE="neutrino"
ALIAS_AND_COLOR=""
ADDITIONAL_BITCOIN_OPTIONS=""
BOLT_DB_OPTIONS=""
CHANNEL_LIMITATIONS=""
BASEFEE = "bitcoin.basefee=0"
EXTERNAL_IP = ""
if os.path.isfile('./tor/data/bitcoin-p2p/hostname'):
EXTERNAL_IP="externalip=" + open('./tor/data/bitcoin-p2p/hostname').read()
if os.path.isfile("./lnd/lnd.conf"):
with open("./lnd/lnd.conf", 'r') as file:
# We generally don't want to allow changing lnd.conf, but we keep as many custom settings as possible
for line in file:
if line.startswith("bitcoin.node="):
BITCOIN_NODE = line.split("=")[1].strip()
if line.startswith("alias="):
ALIAS_AND_COLOR += "\n" + line.strip()
if line.startswith("color="):
ALIAS_AND_COLOR += "\n" + line.strip()
if line.startswith("bitcoin.basefee"):
BASEFEE = line.strip()
if line.startswith("bitcoin.feerate"):
ADDITIONAL_BITCOIN_OPTIONS += "\n" + line.strip()
if line.startswith("minchansize"):
CHANNEL_LIMITATIONS += "\n" + line.strip()
if line.startswith("maxchansize"):
CHANNEL_LIMITATIONS += "\n" + line.strip()
if line.startswith("maxpendingchannels"):
CHANNEL_LIMITATIONS += "\n" + line.strip()
if line.startswith("db.bolt.auto-compact"):
BOLT_DB_OPTIONS += "\n" + line.strip()
if BOLT_DB_OPTIONS != "":
BOLT_DB_OPTIONS = "[bolt]\n" + BOLT_DB_OPTIONS
if CHANNEL_LIMITATIONS == "":
CHANNEL_LIMITATIONS = "maxpendingchannels=3\nminchansize=10000"
NEUTRINO_PEERS=""
if BITCOIN_NETWORK == "mainnet":
BITCOIN_RPC_PORT=8332
BITCOIN_P2P_PORT=8333
elif BITCOIN_NETWORK == "testnet":
BITCOIN_RPC_PORT=18332
BITCOIN_P2P_PORT=18333
NEUTRINO_PEERS='''
[neutrino]
neutrino.addpeer=testnet1-btcd.zaphq.io
neutrino.addpeer=testnet2-btcd.zaphq.io
'''
elif BITCOIN_NETWORK == "signet":
BITCOIN_RPC_PORT=38332
BITCOIN_P2P_PORT=38333
BITCOIN_NODE="bitcoind"
elif BITCOIN_NETWORK == "regtest":
BITCOIN_RPC_PORT=18334
BITCOIN_P2P_PORT=18335
BITCOIN_NODE="bitcoind"
else:
exit(1)
NETWORK_SECTION=""
if BITCOIN_NETWORK != "mainnet":
NETWORK_SECTION = "[{}]".format(BITCOIN_NETWORK)
if BITCOIN_NETWORK == "testnet":
NETWORK_SECTION = "[test]"
# IP addresses for services
NETWORK_IP="10.21.21.0"
GATEWAY_IP="10.21.21.1"
NGINX_IP="10.21.21.2"
DASHBOARD_IP="10.21.21.3"
MANAGER_IP="10.21.21.4"
MIDDLEWARE_IP="10.21.21.5"
NEUTRINO_SWITCHER_IP="10.21.21.6"
DASHBOARD_NEW_IP="10.21.21.7"
BITCOIN_IP="10.21.21.8"
LND_IP="10.21.21.9"
TOR_PROXY_IP="10.21.21.11"
APPS_TOR_IP="10.21.21.12"
APPS_2_TOR_IP="10.21.21.13"
APPS_3_TOR_IP="10.21.21.14"
REDIS_IP="10.21.21.15"
# Ports
BITCOIN_RPC_PORT="8332"
BITCOIN_P2P_PORT="8333"
BITCOIN_ZMQ_RAWBLOCK_PORT="28332"
BITCOIN_ZMQ_RAWTX_PORT="28333"
BITCOIN_ZMQ_HASHBLOCK_PORT="28334"
BITCOIN_ZMQ_SEQUENCE_PORT="28335"
LND_GRPC_PORT="10009"
LND_REST_PORT="8080"
TOR_PROXY_PORT="9050"
TOR_CONTROL_PORT="29051"
DEVICE_HOSTNAME=""
try:
DEVICE_HOSTNAME=subprocess.check_output("hostname").decode("utf-8").strip()
except:
# The content of /etc/hostname is the device's hostname
DEVICE_HOSTNAME=open("/etc/hostname").read().strip()
DOCKER_EXECUTABLE=subprocess.check_output(["which", "docker"]).decode("utf-8").strip()
# Get the real path by following symlinks
DOCKER_BINARY=subprocess.check_output(["readlink", "-f", DOCKER_EXECUTABLE]).decode("utf-8").strip()
# Set LND fee URL for neutrino on mainnet
LND_FEE_URL=""
# If the network is mainnet and status_dir/node-status-bitcoind-ready doesn't exist, set the FEE_URL
if BITCOIN_NETWORK == 'mainnet' and BITCOIN_NODE == 'neutrino':
LND_FEE_URL="feeurl=https://nodes.lightning.computer/fees/v1/btc-fee-estimates.json"
# Checks if a variable with the name exists, if not, check if an env var with the name existts
# if neither exists, then exit with an error
def get_var(var_name, other_locals, file_name):
if var_name in locals():
return str(locals()[var_name])
elif var_name in other_locals:
return str(other_locals[var_name])
elif var_name in globals():
return str(globals()[var_name])
else:
print("Error: {} is not defined! (In file {})".format(var_name, file_name))
exit(1)
# Converts a string to uppercase, also replaces all - with _
def convert_to_upper(string):
return string.upper().replace('-', '_')
# Put variables in the config file. A config file accesses an env var $EXAMPLE_VARIABLE by containing <example-variable>
# in the config file. Check for such occurences and replace them with the actual variable
def replace_vars(file_path):
with open(file_path, 'r') as file:
file_contents = file.read()
return re.sub(r'<(.*?)>', lambda m: get_var(convert_to_upper(m.group(1)), locals(), file_path), file_contents)
templates_to_build = {
"./templates/torrc-empty": ["./tor/torrc-apps", "./tor/torrc-apps-2", "./tor/torrc-apps-3"],
"./templates/torrc-core-sample": "./tor/torrc-core",
"./templates/lnd-sample.conf": "./lnd/lnd.conf",
"./templates/bitcoin-sample.conf": "./bitcoin/bitcoin.conf",
"./templates/.env-sample": "./.env",
"./templates/nginx-sample.conf": "./nginx/nginx.conf"
}
print("Generating configuration files...")
# Loop through templates_to_build and build them
for template_path, output_path in templates_to_build.items():
if output_path == "./nginx/nginx.conf" and os.path.isfile(output_path):
continue
data = replace_vars(template_path)
# If output path is a list, then it is a list of output paths
if isinstance(output_path, list):
for output_path_item in output_path:
# Delete the output path, no matter if it's a file or a directory
if os.path.isdir(output_path_item):
shutil.rmtree(output_path_item)
with open(output_path_item, 'w') as file:
file.write(data)
else:
# Delete the output path, no matter if it's a file or a directory
if os.path.isdir(output_path):
shutil.rmtree(output_path)
with open(output_path, 'w') as file:
file.write(data)
print("Generated configuration files\n")
print("Checking if Docker Compose is installed...")
download_docker_compose()
print("Updating core services...")
print()
with open("docker-compose.yml", 'r') as stream:
compose = yaml.safe_load(stream)
for service in ["manager", "middleware", "dashboard"]:
compose["services"][service]["image"] = dependencies[service]
for service in ["tor", "app-tor", "app-2-tor", "app-3-tor"]:
compose["services"][service]["image"] = dependencies["tor"]
with open("docker-compose.yml", "w") as stream:
yaml.dump(compose, stream, sort_keys=False)
if not reconfiguring:
print("Updating apps...\n")
os.system('./scripts/app --invoked-by-configure update')
else:
print("Updating apps...\n")
os.system('./scripts/app --invoked-by-configure generate')
print("Configuring permissions...\n")
os.system('chown -R 1000:1000 {}'.format(CITADEL_ROOT))
# Touch status_dir/configured
with open(status_dir+'/configured', 'w') as file:
file.write('')
print("Configuration successful\n")
print("You can now start Citadel by running:")
print(" sudo ./scripts/start")