Compare commits

..

No commits in common. "master" and "autobackup-errors" have entirely different histories.

189 changed files with 10274 additions and 7579 deletions

View File

@ -5,11 +5,18 @@ name: default
steps:
- name: Run Tests and Generate Coverage Report
commands:
- nix flake check -L
- kill $(ps aux | grep 'redis-server 127.0.0.1:6389' | awk '{print $2}') || true
- redis-server --bind 127.0.0.1 --port 6389 >/dev/null &
# We do not care about persistance on CI
- sleep 10
- redis-cli -h 127.0.0.1 -p 6389 config set stop-writes-on-bgsave-error no
- coverage run -m pytest -q
- coverage xml
- sonar-scanner -Dsonar.projectKey=SelfPrivacy-REST-API -Dsonar.sources=. -Dsonar.host.url=http://analyzer.lan:9000 -Dsonar.login="$SONARQUBE_TOKEN"
environment:
SONARQUBE_TOKEN:
from_secret: SONARQUBE_TOKEN
USE_REDIS_PORT: 6389
- name: Run Bandit Checks

3
.gitignore vendored Normal file → Executable file
View File

@ -148,6 +148,3 @@ cython_debug/
*.db
*.rdb
/result
/.nixos-test-history

View File

@ -1,2 +0,0 @@
[mypy]
plugins = pydantic.mypy

View File

@ -1,92 +0,0 @@
# SelfPrivacy GraphQL API which allows app to control your server
![CI status](https://ci.selfprivacy.org/api/badges/SelfPrivacy/selfprivacy-rest-api/status.svg)
## Build
```console
$ nix build
```
In case of successful build, you should get the `./result` symlink to a folder (in `/nix/store`) with build contents.
## Develop
```console
$ nix develop
[SP devshell:/dir/selfprivacy-rest-api]$ python
Python 3.10.13 (main, Aug 24 2023, 12:59:26) [GCC 12.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
(ins)>>>
```
If you don't have experimental flakes enabled, you can use the following command:
```console
$ nix --extra-experimental-features nix-command --extra-experimental-features flakes develop
```
## Testing
Run the test suite by running coverage with pytest inside an ephemeral NixOS VM with redis service enabled:
```console
$ nix flake check -L
```
Run the same test suite, but additionally create `./result/coverage.xml` in the current directory:
```console
$ nix build .#checks.x86_64-linux.default -L
```
Alternatively, just print the path to `/nix/store/...coverage.xml` without creating any files in the current directory:
```console
$ nix build .#checks.x86_64-linux.default -L --print-out-paths --no-link
```
Run the same test suite with arbitrary pytest options:
```console
$ pytest-vm.sh # specify pytest options here, e.g. `--last-failed`
```
When running using the script, pytest cache is preserved between runs in `.pytest_cache` folder.
NixOS VM state temporary resides in `${TMPDIR:=/tmp}/nixos-vm-tmp-dir/vm-state-machine` during the test.
Git workdir directory is shared read-write with VM via `.nixos-vm-tmp-dir/shared-xchg` symlink. VM accesses workdir contents via `/tmp/shared` mount point and `/root/source` symlink.
Launch VM and execute commands manually either in Linux console (user `root`) or using python NixOS tests driver API (refer to [NixOS documentation](https://nixos.org/manual/nixos/stable/#ssec-machine-objects)):
```console
$ nix run .#checks.x86_64-linux.default.driverInteractive
```
You can add `--keep-vm-state` in order to keep VM state between runs:
```console
$ TMPDIR=".nixos-vm-tmp-dir" nix run .#checks.x86_64-linux.default.driverInteractive --keep-vm-state
```
Option `-L`/`--print-build-logs` is optional for all nix commands. It tells nix to print each log line one after another instead of overwriting a single one.
## Dependencies and Dependant Modules
This flake depends on a single Nix flake input - nixpkgs repository. nixpkgs repository is used for all software packages used to build, run API service, tests, etc.
In order to synchronize nixpkgs input with the same from selfprivacy-nixos-config repository, use this command:
```console
$ nix flake lock --override-input nixpkgs nixpkgs --inputs-from git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
```
Replace BRANCH with the branch name of selfprivacy-nixos-config repository you want to sync with. During development nixpkgs input update might be required in both selfprivacy-rest-api and selfprivacy-nixos-config repositories simultaneously. So, a new feature branch might be temporarily used until selfprivacy-nixos-config gets the feature branch merged.
Show current flake inputs (e.g. nixpkgs):
```console
$ nix flake metadata
```
Show selfprivacy-nixos-config Nix flake inputs (including nixpkgs):
```console
$ nix flake metadata git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
```
Nix code for NixOS service module for API is located in NixOS configuration repository.
## Troubleshooting
Sometimes commands inside `nix develop` refuse to work properly if the calling shell lacks `LANG` environment variable. Try to set it before entering `nix develop`.

View File

@ -1,29 +0,0 @@
{ pythonPackages, rev ? "local" }:
pythonPackages.buildPythonPackage rec {
pname = "selfprivacy-graphql-api";
version = rev;
src = builtins.filterSource (p: t: p != ".git" && t != "symlink") ./.;
propagatedBuildInputs = with pythonPackages; [
fastapi
gevent
huey
mnemonic
portalocker
psutil
pydantic
pytz
redis
setuptools
strawberry-graphql
typing-extensions
uvicorn
];
pythonImportsCheck = [ "selfprivacy_api" ];
doCheck = false;
meta = {
description = ''
SelfPrivacy Server Management API
'';
};
}

View File

@ -1,26 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1709677081,
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

162
flake.nix
View File

@ -1,162 +0,0 @@
{
description = "SelfPrivacy API flake";
inputs.nixpkgs.url = "github:nixos/nixpkgs";
outputs = { self, nixpkgs, ... }:
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
selfprivacy-graphql-api = pkgs.callPackage ./default.nix {
pythonPackages = pkgs.python310Packages;
rev = self.shortRev or self.dirtyShortRev or "dirty";
};
python = self.packages.${system}.default.pythonModule;
python-env =
python.withPackages (ps:
self.packages.${system}.default.propagatedBuildInputs ++ (with ps; [
coverage
pytest
pytest-datadir
pytest-mock
pytest-subprocess
black
mypy
pylsp-mypy
python-lsp-black
python-lsp-server
pyflakes
typer # for strawberry
types-redis # for mypy
] ++ strawberry-graphql.optional-dependencies.cli));
vmtest-src-dir = "/root/source";
shellMOTD = ''
Welcome to SP API development shell!
[formatters]
black
nixpkgs-fmt
[testing in NixOS VM]
nixos-test-driver - run an interactive NixOS VM with all dependencies included and 2 disk volumes
pytest-vm - run pytest in an ephemeral NixOS VM with Redis, accepting pytest arguments
'';
in
{
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
packages.${system} = {
default = selfprivacy-graphql-api;
pytest-vm = pkgs.writeShellScriptBin "pytest-vm" ''
set -o errexit
set -o nounset
set -o xtrace
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
export TMPDIR=''${TMPDIR:=/tmp}/nixos-vm-tmp-dir
readonly NIXOS_VM_SHARED_DIR_HOST="$TMPDIR/shared-xchg"
readonly NIXOS_VM_SHARED_DIR_GUEST="/tmp/shared"
mkdir -p "$TMPDIR"
ln -sfv "$PWD" -T "$NIXOS_VM_SHARED_DIR_HOST"
SCRIPT=$(cat <<EOF
start_all()
machine.succeed("ln -sf $NIXOS_VM_SHARED_DIR_GUEST -T ${vmtest-src-dir} >&2")
machine.succeed("cd ${vmtest-src-dir} && coverage run -m pytest -v $@ >&2")
machine.succeed("cd ${vmtest-src-dir} && coverage report >&2")
EOF
)
if [ -f "/etc/arch-release" ]; then
${self.checks.${system}.default.driverInteractive}/bin/nixos-test-driver --no-interactive <(printf "%s" "$SCRIPT")
else
${self.checks.${system}.default.driver}/bin/nixos-test-driver -- <(printf "%s" "$SCRIPT")
fi
'';
};
nixosModules.default =
import ./nixos/module.nix self.packages.${system}.default;
devShells.${system}.default = pkgs.mkShellNoCC {
name = "SP API dev shell";
packages = with pkgs; [
nixpkgs-fmt
rclone
redis
restic
self.packages.${system}.pytest-vm
# FIXME consider loading this explicitly only after ArchLinux issue is solved
self.checks.x86_64-linux.default.driverInteractive
# the target API application python environment
python-env
];
shellHook = ''
# envs set with export and as attributes are treated differently.
# for example. printenv <Name> will not fetch the value of an attribute.
export TEST_MODE="true"
# more tips for bash-completion to work on non-NixOS:
# https://discourse.nixos.org/t/whats-the-nix-way-of-bash-completion-for-packages/20209/16?u=alexoundos
# Load installed profiles
for file in "/etc/profile.d/"*.sh; do
# If that folder doesn't exist, bash loves to return the whole glob
[[ -f "$file" ]] && source "$file"
done
printf "%s" "${shellMOTD}"
'';
};
checks.${system} = {
fmt-check = pkgs.runCommandLocal "sp-api-fmt-check"
{ nativeBuildInputs = [ pkgs.black ]; }
"black --check ${self.outPath} > $out";
default =
pkgs.testers.runNixOSTest {
name = "default";
nodes.machine = { lib, pkgs, ... }: {
# 2 additional disks (1024 MiB and 200 MiB) with empty ext4 FS
virtualisation.emptyDiskImages = [ 1024 200 ];
virtualisation.fileSystems."/volumes/vdb" = {
autoFormat = true;
device = "/dev/vdb"; # this name is chosen by QEMU, not here
fsType = "ext4";
noCheck = true;
};
virtualisation.fileSystems."/volumes/vdc" = {
autoFormat = true;
device = "/dev/vdc"; # this name is chosen by QEMU, not here
fsType = "ext4";
noCheck = true;
};
boot.consoleLogLevel = lib.mkForce 3;
documentation.enable = false;
services.journald.extraConfig = lib.mkForce "";
services.redis.servers.sp-api = {
enable = true;
save = [ ];
settings.notify-keyspace-events = "KEA";
};
environment.systemPackages = with pkgs; [
python-env
# TODO: these can be passed via wrapper script around app
rclone
restic
];
environment.variables.TEST_MODE = "true";
systemd.tmpfiles.settings.src.${vmtest-src-dir}.L.argument =
self.outPath;
};
testScript = ''
start_all()
machine.succeed("cd ${vmtest-src-dir} && coverage run --data-file=/tmp/.coverage -m pytest -p no:cacheprovider -v >&2")
machine.succeed("coverage xml --rcfile=${vmtest-src-dir}/.coveragerc --data-file=/tmp/.coverage >&2")
machine.copy_from_vm("coverage.xml", ".")
machine.succeed("coverage report >&2")
'';
};
};
};
nixConfig.bash-prompt = ''\n\[\e[1;32m\][\[\e[0m\]\[\e[1;34m\]SP devshell\[\e[0m\]\[\e[1;32m\]:\w]\$\[\[\e[0m\] '';
}

View File

@ -1,22 +0,0 @@
@startuml
left to right direction
title repositories and flake inputs relations diagram
cloud nixpkgs as nixpkgs_transit
control "<font:monospaced><size:15>nixos-rebuild" as nixos_rebuild
component "SelfPrivacy\nAPI app" as selfprivacy_app
component "SelfPrivacy\nNixOS configuration" as nixos_configuration
note top of nixos_configuration : SelfPrivacy\nAPI service module
nixos_configuration ).. nixpkgs_transit
nixpkgs_transit ..> selfprivacy_app
selfprivacy_app --> nixos_configuration
[nixpkgs] --> nixos_configuration
nixos_configuration -> nixos_rebuild
footer %date("yyyy-MM-dd'T'HH:mmZ")
@enduml

View File

@ -1,166 +0,0 @@
selfprivacy-graphql-api: { config, lib, pkgs, ... }:
let
cfg = config.services.selfprivacy-api;
config-id = "default";
nixos-rebuild = "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
nix = "${config.nix.package.out}/bin/nix";
in
{
options.services.selfprivacy-api = {
enable = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Enable SelfPrivacy API service
'';
};
};
config = lib.mkIf cfg.enable {
users.users."selfprivacy-api" = {
isNormalUser = false;
isSystemUser = true;
extraGroups = [ "opendkim" ];
group = "selfprivacy-api";
};
users.groups."selfprivacy-api".members = [ "selfprivacy-api" ];
systemd.services.selfprivacy-api = {
description = "API Server used to control system from the mobile application";
environment = config.nix.envVars // {
HOME = "/root";
PYTHONUNBUFFERED = "1";
} // config.networking.proxy.envVars;
path = [
"/var/"
"/var/dkim/"
pkgs.coreutils
pkgs.gnutar
pkgs.xz.bin
pkgs.gzip
pkgs.gitMinimal
config.nix.package.out
pkgs.restic
pkgs.mkpasswd
pkgs.util-linux
pkgs.e2fsprogs
pkgs.iproute2
];
after = [ "network-online.target" ];
wantedBy = [ "network-online.target" ];
serviceConfig = {
User = "root";
ExecStart = "${selfprivacy-graphql-api}/bin/app.py";
Restart = "always";
RestartSec = "5";
};
};
systemd.services.selfprivacy-api-worker = {
description = "Task worker for SelfPrivacy API";
environment = config.nix.envVars // {
HOME = "/root";
PYTHONUNBUFFERED = "1";
PYTHONPATH =
pkgs.python310Packages.makePythonPath [ selfprivacy-graphql-api ];
} // config.networking.proxy.envVars;
path = [
"/var/"
"/var/dkim/"
pkgs.coreutils
pkgs.gnutar
pkgs.xz.bin
pkgs.gzip
pkgs.gitMinimal
config.nix.package.out
pkgs.restic
pkgs.mkpasswd
pkgs.util-linux
pkgs.e2fsprogs
pkgs.iproute2
];
after = [ "network-online.target" ];
wantedBy = [ "network-online.target" ];
serviceConfig = {
User = "root";
ExecStart = "${pkgs.python310Packages.huey}/bin/huey_consumer.py selfprivacy_api.task_registry.huey";
Restart = "always";
RestartSec = "5";
};
};
# One shot systemd service to rebuild NixOS using nixos-rebuild
systemd.services.sp-nixos-rebuild = {
description = "nixos-rebuild switch";
environment = config.nix.envVars // {
HOME = "/root";
} // config.networking.proxy.envVars;
# TODO figure out how to get dependencies list reliably
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
# TODO set proper timeout for reboot instead of service restart
serviceConfig = {
User = "root";
WorkingDirectory = "/etc/nixos";
# sync top-level flake with sp-modules sub-flake
# (https://github.com/NixOS/nix/issues/9339)
ExecStartPre = ''
${nix} flake lock --override-input sp-modules path:./sp-modules
'';
ExecStart = ''
${nixos-rebuild} switch --flake .#${config-id}
'';
KillMode = "none";
SendSIGKILL = "no";
};
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
};
# One shot systemd service to upgrade NixOS using nixos-rebuild
systemd.services.sp-nixos-upgrade = {
# protection against simultaneous runs
after = [ "sp-nixos-rebuild.service" ];
description = "Upgrade NixOS and SP modules to latest versions";
environment = config.nix.envVars // {
HOME = "/root";
} // config.networking.proxy.envVars;
# TODO figure out how to get dependencies list reliably
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
serviceConfig = {
User = "root";
WorkingDirectory = "/etc/nixos";
# TODO get URL from systemd template parameter?
ExecStartPre = ''
${nix} flake update \
--override-input selfprivacy-nixos-config git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=flakes
'';
ExecStart = ''
${nixos-rebuild} switch --flake .#${config-id}
'';
KillMode = "none";
SendSIGKILL = "no";
};
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
};
# One shot systemd service to rollback NixOS using nixos-rebuild
systemd.services.sp-nixos-rollback = {
# protection against simultaneous runs
after = [ "sp-nixos-rebuild.service" "sp-nixos-upgrade.service" ];
description = "Rollback NixOS using nixos-rebuild";
environment = config.nix.envVars // {
HOME = "/root";
} // config.networking.proxy.envVars;
# TODO figure out how to get dependencies list reliably
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
serviceConfig = {
User = "root";
WorkingDirectory = "/etc/nixos";
ExecStart = ''
${nixos-rebuild} switch --rollback --flake .#${config-id}
'';
KillMode = "none";
SendSIGKILL = "no";
};
restartIfChanged = false;
unitConfig.X-StopOnRemoval = false;
};
};
}

View File

@ -1,34 +0,0 @@
from selfprivacy_api.utils.block_devices import BlockDevices
from selfprivacy_api.jobs import Jobs, Job
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.services.tasks import move_service as move_service_task
class ServiceNotFoundError(Exception):
pass
class VolumeNotFoundError(Exception):
pass
def move_service(service_id: str, volume_name: str) -> Job:
service = get_service_by_id(service_id)
if service is None:
raise ServiceNotFoundError(f"No such service:{service_id}")
volume = BlockDevices().get_block_device(volume_name)
if volume is None:
raise VolumeNotFoundError(f"No such volume:{volume_name}")
service.assert_can_move(volume)
job = Jobs.add(
type_id=f"services.{service.get_id()}.move",
name=f"Move {service.get_display_name()}",
description=f"Moving {service.get_display_name()} data to {volume.name}",
)
move_service_task(service, volume, job)
return job

View File

@ -31,7 +31,7 @@ def get_ssh_settings() -> UserdataSshSettings:
if "enable" not in data["ssh"]:
data["ssh"]["enable"] = True
if "passwordAuthentication" not in data["ssh"]:
data["ssh"]["passwordAuthentication"] = False
data["ssh"]["passwordAuthentication"] = True
if "rootKeys" not in data["ssh"]:
data["ssh"]["rootKeys"] = []
return UserdataSshSettings(**data["ssh"])

View File

@ -2,10 +2,8 @@
import os
import subprocess
import pytz
from typing import Optional, List
from typing import Optional
from pydantic import BaseModel
from selfprivacy_api.jobs import Job, JobStatus, Jobs
from selfprivacy_api.jobs.upgrade_system import rebuild_system_task
from selfprivacy_api.utils import WriteUserData, ReadUserData
@ -15,7 +13,7 @@ def get_timezone() -> str:
with ReadUserData() as user_data:
if "timezone" in user_data:
return user_data["timezone"]
return "Etc/UTC"
return "Europe/Uzhgorod"
class InvalidTimezone(Exception):
@ -60,68 +58,36 @@ def set_auto_upgrade_settings(
user_data["autoUpgrade"]["allowReboot"] = allowReboot
class ShellException(Exception):
"""Something went wrong when calling another process"""
pass
def run_blocking(cmd: List[str], new_session: bool = False) -> str:
"""Run a process, block until done, return output, complain if failed"""
process_handle = subprocess.Popen(
cmd,
shell=False,
start_new_session=new_session,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_raw, stderr_raw = process_handle.communicate()
stdout = stdout_raw.decode("utf-8")
if stderr_raw is not None:
stderr = stderr_raw.decode("utf-8")
else:
stderr = ""
output = stdout + "\n" + stderr
if process_handle.returncode != 0:
raise ShellException(
f"Shell command failed, command array: {cmd}, output: {output}"
)
return stdout
def rebuild_system() -> Job:
def rebuild_system() -> int:
"""Rebuild the system"""
job = Jobs.add(
type_id="system.nixos.rebuild",
name="Rebuild system",
description="Applying the new system configuration by building the new NixOS generation.",
status=JobStatus.CREATED,
rebuild_result = subprocess.Popen(
["systemctl", "start", "sp-nixos-rebuild.service"], start_new_session=True
)
rebuild_system_task(job)
return job
rebuild_result.communicate()[0]
return rebuild_result.returncode
def rollback_system() -> int:
"""Rollback the system"""
run_blocking(["systemctl", "start", "sp-nixos-rollback.service"], new_session=True)
return 0
def upgrade_system() -> Job:
"""Upgrade the system"""
job = Jobs.add(
type_id="system.nixos.upgrade",
name="Upgrade system",
description="Upgrading the system to the latest version.",
status=JobStatus.CREATED,
rollback_result = subprocess.Popen(
["systemctl", "start", "sp-nixos-rollback.service"], start_new_session=True
)
rebuild_system_task(job, upgrade=True)
return job
rollback_result.communicate()[0]
return rollback_result.returncode
def upgrade_system() -> int:
"""Upgrade the system"""
upgrade_result = subprocess.Popen(
["systemctl", "start", "sp-nixos-upgrade.service"], start_new_session=True
)
upgrade_result.communicate()[0]
return upgrade_result.returncode
def reboot_system() -> None:
"""Reboot the system"""
run_blocking(["reboot"], new_session=True)
subprocess.Popen(["reboot"], start_new_session=True)
def get_system_version() -> str:

View File

@ -58,7 +58,7 @@ def get_users(
)
for user in user_data["users"]
]
if not exclude_primary and "username" in user_data.keys():
if not exclude_primary:
users.append(
UserDataUser(
username=user_data["username"],
@ -107,12 +107,6 @@ class PasswordIsEmpty(Exception):
pass
class InvalidConfiguration(Exception):
"""The userdata is broken"""
pass
def create_user(username: str, password: str):
if password == "":
raise PasswordIsEmpty("Password is empty")
@ -130,10 +124,6 @@ def create_user(username: str, password: str):
with ReadUserData() as user_data:
ensure_ssh_and_users_fields_exist(user_data)
if "username" not in user_data.keys():
raise InvalidConfiguration(
"Broken config: Admin name is not defined. Consider recovery or add it manually"
)
if username == user_data["username"]:
raise UserAlreadyExists("User already exists")
if username in [user["username"] for user in user_data["users"]]:

View File

@ -10,6 +10,12 @@ from selfprivacy_api.dependencies import get_api_version
from selfprivacy_api.graphql.schema import schema
from selfprivacy_api.migrations import run_migrations
from selfprivacy_api.rest import (
system,
users,
api_auth,
services,
)
app = FastAPI()
@ -26,6 +32,10 @@ app.add_middleware(
)
app.include_router(system.router)
app.include_router(users.router)
app.include_router(api_auth.router)
app.include_router(services.router)
app.include_router(graphql_app, prefix="/graphql")

View File

@ -7,6 +7,8 @@ import os
from os import statvfs
from typing import Callable, List, Optional
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.services import (
get_service_by_id,
get_all_services,
@ -42,6 +44,12 @@ from selfprivacy_api.backup.jobs import (
add_restore_job,
)
DEFAULT_JSON_PROVIDER = {
"provider": "BACKBLAZE",
"accountId": "",
"accountKey": "",
"bucket": "",
}
BACKUP_PROVIDER_ENVS = {
"kind": "BACKUP_KIND",
@ -126,11 +134,17 @@ class Backups:
Storage.store_provider(provider)
@staticmethod
def reset() -> None:
def reset(reset_json=True) -> None:
"""
Deletes all the data about the backup storage provider.
"""
Storage.reset()
if reset_json:
try:
Backups._reset_provider_json()
except FileNotFoundError:
# if there is no userdata file, we do not need to reset it
pass
@staticmethod
def _lookup_provider() -> AbstractBackupProvider:
@ -138,6 +152,15 @@ class Backups:
if redis_provider is not None:
return redis_provider
try:
json_provider = Backups._load_provider_json()
except FileNotFoundError:
json_provider = None
if json_provider is not None:
Storage.store_provider(json_provider)
return json_provider
none_provider = Backups._construct_provider(
BackupProviderEnum.NONE, login="", key="", location=""
)
@ -192,6 +215,44 @@ class Backups:
provider_model.repo_id,
)
@staticmethod
def _load_provider_json() -> Optional[AbstractBackupProvider]:
with ReadUserData() as user_data:
provider_dict = {
"provider": "",
"accountId": "",
"accountKey": "",
"bucket": "",
}
if "backup" not in user_data.keys():
if "backblaze" in user_data.keys():
provider_dict.update(user_data["backblaze"])
provider_dict["provider"] = "BACKBLAZE"
return None
else:
provider_dict.update(user_data["backup"])
if provider_dict == DEFAULT_JSON_PROVIDER:
return None
try:
return Backups._construct_provider(
kind=BackupProviderEnum[provider_dict["provider"]],
login=provider_dict["accountId"],
key=provider_dict["accountKey"],
location=provider_dict["bucket"],
)
except KeyError:
return None
@staticmethod
def _reset_provider_json() -> None:
with WriteUserData() as user_data:
if "backblaze" in user_data.keys():
del user_data["backblaze"]
user_data["backup"] = DEFAULT_JSON_PROVIDER
# Init
@staticmethod
@ -254,27 +315,18 @@ class Backups:
reason=reason,
)
Backups._on_new_snapshot_created(service_name, snapshot)
Backups._store_last_snapshot(service_name, snapshot)
if reason == BackupReason.AUTO:
Backups._prune_auto_snaps(service)
service.post_restore()
except Exception as error:
Jobs.update(job, status=JobStatus.ERROR, error=str(error))
Jobs.update(job, status=JobStatus.ERROR, status_text=str(error))
raise error
Jobs.update(job, status=JobStatus.FINISHED)
if reason in [BackupReason.AUTO, BackupReason.PRE_RESTORE]:
Jobs.set_expiration(job, AUTOBACKUP_JOB_EXPIRATION_SECONDS)
return Backups.sync_date_from_cache(snapshot)
@staticmethod
def sync_date_from_cache(snapshot: Snapshot) -> Snapshot:
"""
Our snapshot creation dates are different from those on server by a tiny amount.
This is a convenience, maybe it is better to write a special comparison
function for snapshots
"""
return Storage.get_cached_snapshot_by_id(snapshot.id)
return snapshot
@staticmethod
def _auto_snaps(service):
@ -343,8 +395,11 @@ class Backups:
auto_snaps = Backups._auto_snaps(service)
new_snaplist = Backups._prune_snaps_with_quotas(auto_snaps)
deletable_snaps = [snap for snap in auto_snaps if snap not in new_snaplist]
Backups.forget_snapshots(deletable_snaps)
# TODO: Can be optimized since there is forgetting of an array in one restic op
# but most of the time this will be only one snap to forget.
for snap in auto_snaps:
if snap not in new_snaplist:
Backups.forget_snapshot(snap)
@staticmethod
def _standardize_quotas(i: int) -> int:
@ -371,10 +426,7 @@ class Backups:
yearly=Backups._standardize_quotas(quotas.yearly), # type: ignore
)
)
# do not prune all autosnaps right away, this will be done by an async task
@staticmethod
def prune_all_autosnaps() -> None:
for service in get_all_services():
Backups._prune_auto_snaps(service)
@ -532,12 +584,13 @@ class Backups:
@staticmethod
def get_all_snapshots() -> List[Snapshot]:
"""Returns all snapshots"""
# When we refresh our cache:
# 1. Manually
# 2. On timer
# 3. On new snapshot
# 4. On snapshot deletion
cached_snapshots = Storage.get_cached_snapshots()
if cached_snapshots:
return cached_snapshots
# TODO: the oldest snapshots will get expired faster than the new ones.
# How to detect that the end is missing?
Backups.force_snapshot_cache_reload()
return Storage.get_cached_snapshots()
@staticmethod
@ -553,29 +606,19 @@ class Backups:
return snap
@staticmethod
def forget_snapshots(snapshots: List[Snapshot]) -> None:
"""
Deletes a batch of snapshots from the repo and syncs cache
Optimized
"""
ids = [snapshot.id for snapshot in snapshots]
Backups.provider().backupper.forget_snapshots(ids)
Backups.force_snapshot_cache_reload()
@staticmethod
def forget_snapshot(snapshot: Snapshot) -> None:
"""Deletes a snapshot from the repo and from cache"""
Backups.forget_snapshots([snapshot])
Backups.provider().backupper.forget_snapshot(snapshot.id)
Storage.delete_cached_snapshot(snapshot)
@staticmethod
def forget_all_snapshots():
"""
Mark all snapshots we have made for deletion and make them inaccessible
(this is done by cloud, we only issue a command)
"""
Backups.forget_snapshots(Backups.get_all_snapshots())
"""deliberately erase all snapshots we made"""
# there is no dedicated optimized command for this,
# but maybe we can have a multi-erase
for snapshot in Backups.get_all_snapshots():
Backups.forget_snapshot(snapshot)
@staticmethod
def force_snapshot_cache_reload() -> None:
@ -598,11 +641,12 @@ class Backups:
)
@staticmethod
def _on_new_snapshot_created(service_id: str, snapshot: Snapshot) -> None:
def _store_last_snapshot(service_id: str, snapshot: Snapshot) -> None:
"""What do we do with a snapshot that is just made?"""
# non-expiring timestamp of the last
Storage.store_last_timestamp(service_id, snapshot)
Backups.force_snapshot_cache_reload()
# expiring cache entry
Storage.cache_snapshot(snapshot)
# Autobackup
@ -674,13 +718,10 @@ class Backups:
def is_time_to_backup_service(service: Service, time: datetime):
"""Returns True if it is time to back up a service"""
period = Backups.autobackup_period_minutes()
if period is None:
return False
if not service.is_enabled():
return False
if not service.can_be_backed_up():
return False
if period is None:
return False
last_error = Backups.get_last_backup_error_time(service)
@ -689,9 +730,8 @@ class Backups:
return False
last_backup = Backups.get_last_backed_up(service)
# Queue a backup immediately if there are no previous backups
if last_backup is None:
# queue a backup immediately if there are no previous backups
return True
if time > last_backup + timedelta(minutes=period):

View File

@ -66,8 +66,3 @@ class AbstractBackupper(ABC):
def forget_snapshot(self, snapshot_id) -> None:
"""Forget a snapshot"""
raise NotImplementedError
@abstractmethod
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
"""Maybe optimized deletion of a batch of snapshots, just cycling if unsupported"""
raise NotImplementedError

View File

@ -39,7 +39,4 @@ class NoneBackupper(AbstractBackupper):
raise NotImplementedError
def forget_snapshot(self, snapshot_id):
raise NotImplementedError("forget_snapshot")
def forget_snapshots(self, snapshots):
raise NotImplementedError("forget_snapshots")
raise NotImplementedError

View File

@ -18,7 +18,7 @@ from selfprivacy_api.backup.backuppers import AbstractBackupper
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.backup.jobs import get_backup_job
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.jobs import Jobs, JobStatus, Job
from selfprivacy_api.jobs import Jobs, JobStatus
from selfprivacy_api.backup.local_secret import LocalBackupSecret
@ -86,10 +86,6 @@ class ResticBackupper(AbstractBackupper):
return f"echo {LocalBackupSecret.get()}"
def restic_command(self, *args, tags: Optional[List[str]] = None) -> List[str]:
"""
Construct a restic command against the currently configured repo
Can support [nested] arrays as arguments, will flatten them into the final commmand
"""
if tags is None:
tags = []
@ -146,55 +142,6 @@ class ResticBackupper(AbstractBackupper):
result.append(item)
return result
@staticmethod
def _run_backup_command(
backup_command: List[str], job: Optional[Job]
) -> List[dict]:
"""And handle backup output"""
messages = []
output = []
restic_reported_error = False
for raw_message in output_yielder(backup_command):
if "ERROR:" in raw_message:
restic_reported_error = True
output.append(raw_message)
if not restic_reported_error:
message = ResticBackupper.parse_message(raw_message, job)
messages.append(message)
if restic_reported_error:
raise ValueError(
"Restic returned error(s): ",
output,
)
return messages
@staticmethod
def _replace_in_array(array: List[str], target, replacement) -> None:
if target == "":
return
for i, value in enumerate(array):
if target in value:
array[i] = array[i].replace(target, replacement)
def _censor_command(self, command: List[str]) -> List[str]:
result = command.copy()
ResticBackupper._replace_in_array(result, self.key, "CENSORED")
ResticBackupper._replace_in_array(result, LocalBackupSecret.get(), "CENSORED")
return result
@staticmethod
def _get_backup_job(service_name: str) -> Optional[Job]:
service = get_service_by_id(service_name)
if service is None:
raise ValueError("No service with id ", service_name)
return get_backup_job(service)
@unlocked_repo
def start_backup(
self,
@ -205,11 +152,13 @@ class ResticBackupper(AbstractBackupper):
"""
Start backup with restic
"""
assert len(folders) != 0
job = ResticBackupper._get_backup_job(service_name)
# but maybe it is ok to accept a union
# of a string and an array of strings
assert not isinstance(folders, str)
tags = [service_name, reason.value]
backup_command = self.restic_command(
"backup",
"--json",
@ -217,9 +166,18 @@ class ResticBackupper(AbstractBackupper):
tags=tags,
)
try:
messages = ResticBackupper._run_backup_command(backup_command, job)
service = get_service_by_id(service_name)
if service is None:
raise ValueError("No service with id ", service_name)
job = get_backup_job(service)
messages = []
output = []
try:
for raw_message in output_yielder(backup_command):
output.append(raw_message)
message = self.parse_message(raw_message, job)
messages.append(message)
id = ResticBackupper._snapshot_id_from_backup_messages(messages)
return Snapshot(
created_at=datetime.datetime.now(datetime.timezone.utc),
@ -232,8 +190,9 @@ class ResticBackupper(AbstractBackupper):
raise ValueError(
"Could not create a snapshot: ",
str(error),
"command: ",
self._censor_command(backup_command),
output,
"parsed messages:",
messages,
) from error
@staticmethod
@ -246,8 +205,7 @@ class ResticBackupper(AbstractBackupper):
raise ValueError("no summary message in restic json output")
@staticmethod
def parse_message(raw_message_line: str, job: Optional[Job] = None) -> dict:
def parse_message(self, raw_message_line: str, job=None) -> dict:
message = ResticBackupper.parse_json_output(raw_message_line)
if not isinstance(message, dict):
raise ValueError("we have too many messages on one line?")
@ -408,6 +366,7 @@ class ResticBackupper(AbstractBackupper):
stderr=subprocess.STDOUT,
shell=False,
) as handle:
# for some reason restore does not support
# nice reporting of progress via json
output = handle.communicate()[0].decode("utf-8")
@ -425,15 +384,15 @@ class ResticBackupper(AbstractBackupper):
output,
)
def forget_snapshot(self, snapshot_id: str) -> None:
self.forget_snapshots([snapshot_id])
@unlocked_repo
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
# in case the backupper program supports batching, otherwise implement it by cycling
def forget_snapshot(self, snapshot_id) -> None:
"""
Either removes snapshot or marks it for deletion later,
depending on server settings
"""
forget_command = self.restic_command(
"forget",
[snapshot_ids],
snapshot_id,
# TODO: prune should be done in a separate process
"--prune",
)
@ -455,7 +414,7 @@ class ResticBackupper(AbstractBackupper):
if "no matching ID found" in err:
raise ValueError(
"trying to delete, but no such snapshot(s): ", snapshot_ids
"trying to delete, but no such snapshot: ", snapshot_id
)
assert (

View File

@ -14,10 +14,6 @@ def backup_job_type(service: Service) -> str:
return f"{job_type_prefix(service)}.backup"
def autobackup_job_type() -> str:
return "backups.autobackup"
def restore_job_type(service: Service) -> str:
return f"{job_type_prefix(service)}.restore"
@ -40,17 +36,6 @@ def is_something_running_for(service: Service) -> bool:
return len(running_jobs) != 0
def add_autobackup_job(services: List[Service]) -> Job:
service_names = [s.get_display_name() for s in services]
pretty_service_list: str = ", ".join(service_names)
job = Jobs.add(
type_id=autobackup_job_type(),
name="Automatic backup",
description=f"Scheduled backup for services: {pretty_service_list}",
)
return job
def add_backup_job(service: Service) -> Job:
if is_something_running_for(service):
message = (
@ -93,14 +78,12 @@ def get_job_by_type(type_id: str) -> Optional[Job]:
JobStatus.RUNNING,
]:
return job
return None
def get_failed_job_by_type(type_id: str) -> Optional[Job]:
for job in Jobs.get_jobs():
if job.type_id == type_id and job.status == JobStatus.ERROR:
return job
return None
def get_backup_job(service: Service) -> Optional[Job]:

View File

@ -21,8 +21,6 @@ PROVIDER_MAPPING: dict[BackupProviderEnum, Type[AbstractBackupProvider]] = {
def get_provider(
provider_type: BackupProviderEnum,
) -> Type[AbstractBackupProvider]:
if provider_type not in PROVIDER_MAPPING.keys():
raise LookupError("could not look up provider", provider_type)
return PROVIDER_MAPPING[provider_type]

View File

@ -138,17 +138,18 @@ class Storage:
@staticmethod
def store_provider(provider: AbstractBackupProvider) -> None:
"""Stores backup provider auth data in redis"""
model = BackupProviderModel(
kind=get_kind(provider),
login=provider.login,
key=provider.key,
location=provider.location,
repo_id=provider.repo_id,
"""Stores backup stroage provider auth data in redis"""
store_model_as_hash(
redis,
REDIS_PROVIDER_KEY,
BackupProviderModel(
kind=get_kind(provider),
login=provider.login,
key=provider.key,
location=provider.location,
repo_id=provider.repo_id,
),
)
store_model_as_hash(redis, REDIS_PROVIDER_KEY, model)
if Storage.load_provider() != model:
raise IOError("could not store the provider model: ", model.dict)
@staticmethod
def load_provider() -> Optional[BackupProviderModel]:

View File

@ -3,20 +3,13 @@ The tasks module contains the worker tasks that are used to back up and restore
"""
from datetime import datetime, timezone
from selfprivacy_api.graphql.common_types.backup import (
RestoreStrategy,
BackupReason,
)
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy, BackupReason
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.utils.huey import huey
from huey import crontab
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.services.service import Service
from selfprivacy_api.backup import Backups
from selfprivacy_api.backup.jobs import add_autobackup_job
from selfprivacy_api.jobs import Jobs, JobStatus, Job
SNAPSHOT_CACHE_TTL_HOURS = 6
@ -33,33 +26,16 @@ def validate_datetime(dt: datetime) -> bool:
# huey tasks need to return something
@huey.task()
def start_backup(service_id: str, reason: BackupReason = BackupReason.EXPLICIT) -> bool:
def start_backup(
service: Service, reason: BackupReason = BackupReason.EXPLICIT
) -> bool:
"""
The worker task that starts the backup process.
"""
service = get_service_by_id(service_id)
if service is None:
raise ValueError(f"No such service: {service_id}")
Backups.back_up(service, reason)
return True
@huey.task()
def prune_autobackup_snapshots(job: Job) -> bool:
"""
Remove all autobackup snapshots that do not fit into quotas set
"""
Jobs.update(job, JobStatus.RUNNING)
try:
Backups.prune_all_autosnaps()
except Exception as e:
Jobs.update(job, JobStatus.ERROR, error=type(e).__name__ + ":" + str(e))
return False
Jobs.update(job, JobStatus.FINISHED)
return True
@huey.task()
def restore_snapshot(
snapshot: Snapshot,
@ -72,46 +48,16 @@ def restore_snapshot(
return True
def do_autobackup() -> None:
"""
Body of autobackup task, broken out to test it
For some reason, we cannot launch periodic huey tasks
inside tests
"""
time = datetime.utcnow().replace(tzinfo=timezone.utc)
services_to_back_up = Backups.services_to_back_up(time)
if not services_to_back_up:
return
job = add_autobackup_job(services_to_back_up)
progress_per_service = 100 // len(services_to_back_up)
progress = 0
Jobs.update(job, JobStatus.RUNNING, progress=progress)
for service in services_to_back_up:
try:
Backups.back_up(service, BackupReason.AUTO)
except Exception as error:
Jobs.update(
job,
status=JobStatus.ERROR,
error=type(error).__name__ + ": " + str(error),
)
return
progress = progress + progress_per_service
Jobs.update(job, JobStatus.RUNNING, progress=progress)
Jobs.update(job, JobStatus.FINISHED)
@huey.periodic_task(validate_datetime=validate_datetime)
def automatic_backup() -> None:
def automatic_backup():
"""
The worker periodic task that starts the automatic backup process.
"""
do_autobackup()
time = datetime.utcnow().replace(tzinfo=timezone.utc)
for service in Backups.services_to_back_up(time):
start_backup(service, BackupReason.AUTO)
@huey.periodic_task(crontab(hour="*/" + str(SNAPSHOT_CACHE_TTL_HOURS)))
@huey.periodic_task(crontab(hour=SNAPSHOT_CACHE_TTL_HOURS))
def reload_snapshot_cache():
Backups.force_snapshot_cache_reload()

View File

@ -27,4 +27,4 @@ async def get_token_header(
def get_api_version() -> str:
"""Get API version"""
return "3.2.1"
return "2.4.2"

View File

@ -2,7 +2,6 @@ import typing
import strawberry
# TODO: use https://strawberry.rocks/docs/integrations/pydantic when it is stable
@strawberry.type
class DnsRecord:
"""DNS record"""
@ -12,4 +11,3 @@ class DnsRecord:
content: str
ttl: int
priority: typing.Optional[int]
display_name: str

View File

@ -1,17 +1,13 @@
from enum import Enum
from typing import Optional, List
import datetime
import typing
import strawberry
import datetime
from selfprivacy_api.graphql.common_types.backup import BackupReason
from selfprivacy_api.graphql.common_types.dns import DnsRecord
from selfprivacy_api.services import get_service_by_id, get_services_by_location
from selfprivacy_api.services import Service as ServiceInterface
from selfprivacy_api.services import ServiceDnsRecord
from selfprivacy_api.utils.block_devices import BlockDevices
from selfprivacy_api.utils.network import get_ip4, get_ip6
def get_usages(root: "StorageVolume") -> list["StorageUsageInterface"]:
@ -36,8 +32,8 @@ class StorageVolume:
used_space: str
root: bool
name: str
model: Optional[str]
serial: Optional[str]
model: typing.Optional[str]
serial: typing.Optional[str]
type: str
@strawberry.field
@ -49,7 +45,7 @@ class StorageVolume:
@strawberry.interface
class StorageUsageInterface:
used_space: str
volume: Optional[StorageVolume]
volume: typing.Optional[StorageVolume]
title: str
@ -57,7 +53,7 @@ class StorageUsageInterface:
class ServiceStorageUsage(StorageUsageInterface):
"""Storage usage for a service"""
service: Optional["Service"]
service: typing.Optional["Service"]
@strawberry.enum
@ -89,20 +85,6 @@ def get_storage_usage(root: "Service") -> ServiceStorageUsage:
)
# TODO: This won't be needed when deriving DnsRecord via strawberry pydantic integration
# https://strawberry.rocks/docs/integrations/pydantic
# Remove when the link above says it got stable.
def service_dns_to_graphql(record: ServiceDnsRecord) -> DnsRecord:
return DnsRecord(
record_type=record.type,
name=record.name,
content=record.content,
ttl=record.ttl,
priority=record.priority,
display_name=record.display_name,
)
@strawberry.type
class Service:
id: str
@ -115,26 +97,16 @@ class Service:
can_be_backed_up: bool
backup_description: str
status: ServiceStatusEnum
url: Optional[str]
@strawberry.field
def dns_records(self) -> Optional[List[DnsRecord]]:
service = get_service_by_id(self.id)
if service is None:
raise LookupError(f"no service {self.id}. Should be unreachable")
raw_records = service.get_dns_records(get_ip4(), get_ip6())
dns_records = [service_dns_to_graphql(record) for record in raw_records]
return dns_records
url: typing.Optional[str]
dns_records: typing.Optional[typing.List[DnsRecord]]
@strawberry.field
def storage_usage(self) -> ServiceStorageUsage:
"""Get storage usage for a service"""
return get_storage_usage(self)
# TODO: fill this
@strawberry.field
def backup_snapshots(self) -> Optional[List["SnapshotInfo"]]:
def backup_snapshots(self) -> typing.Optional[typing.List["SnapshotInfo"]]:
return None
@ -160,10 +132,20 @@ def service_to_graphql_service(service: ServiceInterface) -> Service:
backup_description=service.get_backup_description(),
status=ServiceStatusEnum(service.get_status().value),
url=service.get_url(),
dns_records=[
DnsRecord(
record_type=record.type,
name=record.name,
content=record.content,
ttl=record.ttl,
priority=record.priority,
)
for record in service.get_dns_records()
],
)
def get_volume_by_id(volume_id: str) -> Optional[StorageVolume]:
def get_volume_by_id(volume_id: str) -> typing.Optional[StorageVolume]:
"""Get volume by id"""
volume = BlockDevices().get_block_device(volume_id)
if volume is None:

View File

@ -17,6 +17,7 @@ class UserType(Enum):
@strawberry.type
class User:
user_type: UserType
username: str
# userHomeFolderspace: UserHomeFolderUsage
@ -31,6 +32,7 @@ class UserMutationReturn(MutationReturnInterface):
def get_user_by_username(username: str) -> typing.Optional[User]:
user = users_actions.get_user_by_username(username)
if user is None:
return None

View File

@ -1,8 +1,6 @@
import typing
import strawberry
from selfprivacy_api.jobs import Jobs
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericMutationReturn,
@ -20,11 +18,7 @@ from selfprivacy_api.graphql.common_types.backup import (
from selfprivacy_api.backup import Backups
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.backup.tasks import (
start_backup,
restore_snapshot,
prune_autobackup_snapshots,
)
from selfprivacy_api.backup.tasks import start_backup, restore_snapshot
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
@ -109,16 +103,8 @@ class BackupMutations:
To disable autobackup use autobackup period setting, not this mutation.
"""
job = Jobs.add(
name="Trimming autobackup snapshots",
type_id="backups.autobackup_trimming",
description="Pruning the excessive snapshots after the new autobackup quotas are set",
)
try:
Backups.set_autobackup_quotas(quotas)
# this task is async and can fail with only a job to report the error
prune_autobackup_snapshots(job)
return GenericBackupConfigReturn(
success=True,
message="",
@ -129,7 +115,7 @@ class BackupMutations:
except Exception as e:
return GenericBackupConfigReturn(
success=False,
message=type(e).__name__ + ":" + str(e),
message=str(e),
code=400,
configuration=Backup().configuration(),
)
@ -148,7 +134,7 @@ class BackupMutations:
)
job = add_backup_job(service)
start_backup(service_id)
start_backup(service)
return GenericJobMutationReturn(
success=True,

View File

@ -20,7 +20,6 @@ from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericMutationReturn,
)
from selfprivacy_api.graphql.mutations.services_mutations import (
ServiceJobMutationReturn,
ServiceMutationReturn,
ServicesMutations,
)
@ -202,7 +201,7 @@ class DeprecatedServicesMutations:
"services",
)
move_service: ServiceJobMutationReturn = deprecated_mutation(
move_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.move_service,
"services",
)

View File

@ -4,26 +4,18 @@ import typing
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
from selfprivacy_api.jobs import JobStatus
from traceback import format_tb as format_traceback
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobMutationReturn,
GenericMutationReturn,
)
from selfprivacy_api.graphql.common_types.service import (
Service,
service_to_graphql_service,
)
from selfprivacy_api.actions.services import (
move_service,
ServiceNotFoundError,
VolumeNotFoundError,
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobMutationReturn,
GenericMutationReturn,
)
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.utils.block_devices import BlockDevices
@strawberry.type
@ -55,22 +47,14 @@ class ServicesMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def enable_service(self, service_id: str) -> ServiceMutationReturn:
"""Enable service."""
try:
service = get_service_by_id(service_id)
if service is None:
return ServiceMutationReturn(
success=False,
message="Service not found.",
code=404,
)
service.enable()
except Exception as e:
service = get_service_by_id(service_id)
if service is None:
return ServiceMutationReturn(
success=False,
message=pretty_error(e),
code=400,
message="Service not found.",
code=404,
)
service.enable()
return ServiceMutationReturn(
success=True,
message="Service enabled.",
@ -81,21 +65,14 @@ class ServicesMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def disable_service(self, service_id: str) -> ServiceMutationReturn:
"""Disable service."""
try:
service = get_service_by_id(service_id)
if service is None:
return ServiceMutationReturn(
success=False,
message="Service not found.",
code=404,
)
service.disable()
except Exception as e:
service = get_service_by_id(service_id)
if service is None:
return ServiceMutationReturn(
success=False,
message=pretty_error(e),
code=400,
message="Service not found.",
code=404,
)
service.disable()
return ServiceMutationReturn(
success=True,
message="Service disabled.",
@ -160,58 +137,33 @@ class ServicesMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def move_service(self, input: MoveServiceInput) -> ServiceJobMutationReturn:
"""Move service."""
# We need a service instance for a reply later
service = get_service_by_id(input.service_id)
if service is None:
return ServiceJobMutationReturn(
success=False,
message=f"Service does not exist: {input.service_id}",
message="Service not found.",
code=404,
)
try:
job = move_service(input.service_id, input.location)
except (ServiceNotFoundError, VolumeNotFoundError) as e:
if not service.is_movable():
return ServiceJobMutationReturn(
success=False,
message=pretty_error(e),
code=404,
)
except Exception as e:
return ServiceJobMutationReturn(
success=False,
message=pretty_error(e),
message="Service is not movable.",
code=400,
service=service_to_graphql_service(service),
)
if job.status in [JobStatus.CREATED, JobStatus.RUNNING]:
return ServiceJobMutationReturn(
success=True,
message="Started moving the service.",
code=200,
service=service_to_graphql_service(service),
job=job_to_api_job(job),
)
elif job.status == JobStatus.FINISHED:
return ServiceJobMutationReturn(
success=True,
message="Service moved.",
code=200,
service=service_to_graphql_service(service),
job=job_to_api_job(job),
)
else:
volume = BlockDevices().get_block_device(input.location)
if volume is None:
return ServiceJobMutationReturn(
success=False,
message=f"While moving service and performing the step '{job.status_text}', error occured: {job.error}",
code=400,
message="Volume not found.",
code=404,
service=service_to_graphql_service(service),
job=job_to_api_job(job),
)
def pretty_error(e: Exception) -> str:
traceback = "/r".join(format_traceback(e.__traceback__))
return type(e).__name__ + ": " + str(e) + ": " + traceback
job = service.move_to_volume(volume)
return ServiceJobMutationReturn(
success=True,
message="Service moved.",
code=200,
service=service_to_graphql_service(service),
job=job_to_api_job(job),
)

View File

@ -3,18 +3,12 @@
import typing
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobMutationReturn,
GenericMutationReturn,
MutationReturnInterface,
GenericJobMutationReturn,
)
import selfprivacy_api.actions.system as system_actions
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
from selfprivacy_api.jobs.nix_collect_garbage import start_nix_collect_garbage
import selfprivacy_api.actions.ssh as ssh_actions
@strawberry.type
@ -32,22 +26,6 @@ class AutoUpgradeSettingsMutationReturn(MutationReturnInterface):
allowReboot: bool
@strawberry.type
class SSHSettingsMutationReturn(MutationReturnInterface):
"""A return type for after changing SSH settings"""
enable: bool
password_authentication: bool
@strawberry.input
class SSHSettingsInput:
"""Input type for SSH settings"""
enable: bool
password_authentication: bool
@strawberry.input
class AutoUpgradeSettingsInput:
"""Input type for auto upgrade settings"""
@ -99,90 +77,40 @@ class SystemMutations:
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def change_ssh_settings(
self, settings: SSHSettingsInput
) -> SSHSettingsMutationReturn:
"""Change ssh settings of the server."""
ssh_actions.set_ssh_settings(
enable=settings.enable,
password_authentication=settings.password_authentication,
)
new_settings = ssh_actions.get_ssh_settings()
return SSHSettingsMutationReturn(
def run_system_rebuild(self) -> GenericMutationReturn:
system_actions.rebuild_system()
return GenericMutationReturn(
success=True,
message="SSH settings changed",
message="Starting rebuild system",
code=200,
enable=new_settings.enable,
password_authentication=new_settings.passwordAuthentication,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def run_system_rebuild(self) -> GenericJobMutationReturn:
try:
job = system_actions.rebuild_system()
return GenericJobMutationReturn(
success=True,
message="Starting system rebuild",
code=200,
job=job_to_api_job(job),
)
except system_actions.ShellException as e:
return GenericJobMutationReturn(
success=False,
message=str(e),
code=500,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def run_system_rollback(self) -> GenericMutationReturn:
system_actions.rollback_system()
try:
return GenericMutationReturn(
success=True,
message="Starting system rollback",
code=200,
)
except system_actions.ShellException as e:
return GenericMutationReturn(
success=False,
message=str(e),
code=500,
)
return GenericMutationReturn(
success=True,
message="Starting rebuild system",
code=200,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def run_system_upgrade(self) -> GenericJobMutationReturn:
try:
job = system_actions.upgrade_system()
return GenericJobMutationReturn(
success=True,
message="Starting system upgrade",
code=200,
job=job_to_api_job(job),
)
except system_actions.ShellException as e:
return GenericJobMutationReturn(
success=False,
message=str(e),
code=500,
)
def run_system_upgrade(self) -> GenericMutationReturn:
system_actions.upgrade_system()
return GenericMutationReturn(
success=True,
message="Starting rebuild system",
code=200,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def reboot_system(self) -> GenericMutationReturn:
system_actions.reboot_system()
try:
return GenericMutationReturn(
success=True,
message="System reboot has started",
code=200,
)
except system_actions.ShellException as e:
return GenericMutationReturn(
success=False,
message=str(e),
code=500,
)
return GenericMutationReturn(
success=True,
message="System reboot has started",
code=200,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def pull_repository_changes(self) -> GenericMutationReturn:
@ -198,14 +126,3 @@ class SystemMutations:
message=f"Failed to pull repository changes:\n{result.data}",
code=500,
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def nix_collect_garbage(self) -> GenericJobMutationReturn:
job = start_nix_collect_garbage()
return GenericJobMutationReturn(
success=True,
code=200,
message="Garbage collector started...",
job=job_to_api_job(job),
)

View File

@ -69,12 +69,6 @@ class UsersMutations:
message=str(e),
code=400,
)
except users_actions.InvalidConfiguration as e:
return UserMutationReturn(
success=False,
message=str(e),
code=400,
)
except users_actions.UserAlreadyExists as e:
return UserMutationReturn(
success=False,

View File

@ -34,24 +34,6 @@ class BackupConfiguration:
location_id: typing.Optional[str]
# TODO: Ideally this should not be done in API but making an internal Service requires more work
# than to make an API record about a service
def tombstone_service(service_id: str) -> Service:
return Service(
id=service_id,
display_name=f"{service_id} (Orphaned)",
description="",
svg_icon="",
is_movable=False,
is_required=False,
is_enabled=False,
status=ServiceStatusEnum.OFF,
url=None,
can_be_backed_up=False,
backup_description="",
)
@strawberry.type
class Backup:
@strawberry.field
@ -73,21 +55,27 @@ class Backup:
result = []
snapshots = Backups.get_all_snapshots()
for snap in snapshots:
api_service = None
service = get_service_by_id(snap.service_name)
if service is None:
api_service = tombstone_service(snap.service_name)
else:
api_service = service_to_graphql_service(service)
if api_service is None:
raise NotImplementedError(
f"Could not construct API Service record for:{snap.service_name}. This should be unreachable and is a bug if you see it."
service = Service(
id=snap.service_name,
display_name=f"{snap.service_name} (Orphaned)",
description="",
svg_icon="",
is_movable=False,
is_required=False,
is_enabled=False,
status=ServiceStatusEnum.OFF,
url=None,
dns_records=None,
can_be_backed_up=False,
backup_description="",
)
else:
service = service_to_graphql_service(service)
graphql_snap = SnapshotInfo(
id=snap.id,
service=api_service,
service=service,
created_at=snap.created_at,
reason=snap.reason,
)

View File

@ -15,6 +15,7 @@ from selfprivacy_api.jobs import Jobs
class Job:
@strawberry.field
def get_jobs(self) -> typing.List[ApiJob]:
Jobs.get_jobs()
return [job_to_api_job(job) for job in Jobs.get_jobs()]

View File

@ -14,7 +14,6 @@ class DnsProvider(Enum):
class ServerProvider(Enum):
HETZNER = "HETZNER"
DIGITALOCEAN = "DIGITALOCEAN"
OTHER = "OTHER"
@strawberry.enum

View File

@ -33,7 +33,6 @@ class SystemDomainInfo:
content=record.content,
ttl=record.ttl,
priority=record.priority,
display_name=record.display_name,
)
for record in get_all_required_dns_records()
]

View File

@ -268,20 +268,6 @@ class Jobs:
return False
def report_progress(progress: int, job: Job, status_text: str) -> None:
"""
A terse way to call a common operation, for readability
job.report_progress() would be even better
but it would go against how this file is written
"""
Jobs.update(
job=job,
status=JobStatus.RUNNING,
status_text=status_text,
progress=progress,
)
def _redis_key_from_uuid(uuid_string) -> str:
return "jobs:" + str(uuid_string)

View File

@ -67,8 +67,8 @@ def move_folder(
try:
data_path.mkdir(mode=0o750, parents=True, exist_ok=True)
except Exception as error:
print(f"Error creating data path: {error}")
except Exception as e:
print(f"Error creating data path: {e}")
return
try:

View File

@ -1,147 +0,0 @@
import re
import subprocess
from typing import Tuple, Iterable
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.jobs import JobStatus, Jobs, Job
class ShellException(Exception):
"""Shell-related errors"""
COMPLETED_WITH_ERROR = "Error occurred, please report this to the support chat."
RESULT_WAS_NOT_FOUND_ERROR = (
"We are sorry, garbage collection result was not found. "
"Something went wrong, please report this to the support chat."
)
CLEAR_COMPLETED = "Garbage collection completed."
def delete_old_gens_and_return_dead_report() -> str:
subprocess.run(
["nix-env", "-p", "/nix/var/nix/profiles/system", "--delete-generations old"],
check=False,
)
result = subprocess.check_output(["nix-store", "--gc", "--print-dead"]).decode(
"utf-8"
)
return " " if result is None else result
def run_nix_collect_garbage() -> Iterable[bytes]:
process = subprocess.Popen(
["nix-store", "--gc"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
return process.stdout if process.stdout else iter([])
def parse_line(job: Job, line: str) -> Job:
"""
We parse the string for the presence of a final line,
with the final amount of space cleared.
Simply put, we're just looking for a similar string:
"1537 store paths deleted, 339.84 MiB freed".
"""
pattern = re.compile(r"[+-]?\d+\.\d+ \w+(?= freed)")
match = re.search(pattern, line)
if match is None:
raise ShellException("nix returned gibberish output")
else:
Jobs.update(
job=job,
status=JobStatus.FINISHED,
status_text=CLEAR_COMPLETED,
result=f"{match.group(0)} have been cleared",
)
return job
def process_stream(job: Job, stream: Iterable[bytes], total_dead_packages: int) -> None:
completed_packages = 0
prev_progress = 0
for line in stream:
line = line.decode("utf-8")
if "deleting '/nix/store/" in line:
completed_packages += 1
percent = int((completed_packages / total_dead_packages) * 100)
if percent - prev_progress >= 5:
Jobs.update(
job=job,
status=JobStatus.RUNNING,
progress=percent,
status_text="Cleaning...",
)
prev_progress = percent
elif "store paths deleted," in line:
parse_line(job, line)
def get_dead_packages(output) -> Tuple[int, float]:
dead = len(re.findall("/nix/store/", output))
percent = 0
if dead != 0:
percent = 100 / dead
return dead, percent
@huey.task()
def calculate_and_clear_dead_paths(job: Job):
Jobs.update(
job=job,
status=JobStatus.RUNNING,
progress=0,
status_text="Calculate the number of dead packages...",
)
dead_packages, package_equal_to_percent = get_dead_packages(
delete_old_gens_and_return_dead_report()
)
if dead_packages == 0:
Jobs.update(
job=job,
status=JobStatus.FINISHED,
status_text="Nothing to clear",
result="System is clear",
)
return True
Jobs.update(
job=job,
status=JobStatus.RUNNING,
progress=0,
status_text=f"Found {dead_packages} packages to remove!",
)
stream = run_nix_collect_garbage()
try:
process_stream(job, stream, dead_packages)
except ShellException as error:
Jobs.update(
job=job,
status=JobStatus.ERROR,
status_text=COMPLETED_WITH_ERROR,
error=RESULT_WAS_NOT_FOUND_ERROR,
)
def start_nix_collect_garbage() -> Job:
job = Jobs.add(
type_id="maintenance.collect_nix_garbage",
name="Collect garbage",
description="Cleaning up unused packages",
)
calculate_and_clear_dead_paths(job=job)
return job

View File

@ -1,136 +0,0 @@
"""
A task to start the system upgrade or rebuild by starting a systemd unit.
After starting, track the status of the systemd unit and update the Job
status accordingly.
"""
import subprocess
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.jobs import JobStatus, Jobs, Job
from selfprivacy_api.utils.waitloop import wait_until_true
from selfprivacy_api.utils.systemd import (
get_service_status,
get_last_log_lines,
ServiceStatus,
)
START_TIMEOUT = 60 * 5
START_INTERVAL = 1
RUN_TIMEOUT = 60 * 60
RUN_INTERVAL = 5
def check_if_started(unit_name: str):
"""Check if the systemd unit has started"""
try:
status = get_service_status(unit_name)
if status == ServiceStatus.ACTIVE:
return True
return False
except subprocess.CalledProcessError:
return False
def check_running_status(job: Job, unit_name: str):
"""Check if the systemd unit is running"""
try:
status = get_service_status(unit_name)
if status == ServiceStatus.INACTIVE:
Jobs.update(
job=job,
status=JobStatus.FINISHED,
result="System rebuilt.",
progress=100,
)
return True
if status == ServiceStatus.FAILED:
log_lines = get_last_log_lines(unit_name, 10)
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="System rebuild failed. Last log lines:\n" + "\n".join(log_lines),
)
return True
if status == ServiceStatus.ACTIVE:
log_lines = get_last_log_lines(unit_name, 1)
Jobs.update(
job=job,
status=JobStatus.RUNNING,
status_text=log_lines[0] if len(log_lines) > 0 else "",
)
return False
return False
except subprocess.CalledProcessError:
return False
def rebuild_system(job: Job, upgrade: bool = False):
"""
Broken out to allow calling it synchronously.
We cannot just block until task is done because it will require a second worker
Which we do not have
"""
unit_name = "sp-nixos-upgrade.service" if upgrade else "sp-nixos-rebuild.service"
try:
command = ["systemctl", "start", unit_name]
subprocess.run(
command,
check=True,
start_new_session=True,
shell=False,
)
Jobs.update(
job=job,
status=JobStatus.RUNNING,
status_text="Starting the system rebuild...",
)
# Wait for the systemd unit to start
try:
wait_until_true(
lambda: check_if_started(unit_name),
timeout_sec=START_TIMEOUT,
interval=START_INTERVAL,
)
except TimeoutError:
log_lines = get_last_log_lines(unit_name, 10)
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="System rebuild timed out. Last log lines:\n"
+ "\n".join(log_lines),
)
return
Jobs.update(
job=job,
status=JobStatus.RUNNING,
status_text="Rebuilding the system...",
)
# Wait for the systemd unit to finish
try:
wait_until_true(
lambda: check_running_status(job, unit_name),
timeout_sec=RUN_TIMEOUT,
interval=RUN_INTERVAL,
)
except TimeoutError:
log_lines = get_last_log_lines(unit_name, 10)
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="System rebuild timed out. Last log lines:\n"
+ "\n".join(log_lines),
)
return
except subprocess.CalledProcessError as e:
Jobs.update(
job=job,
status=JobStatus.ERROR,
status_text=str(e),
)
@huey.task()
def rebuild_system_task(job: Job, upgrade: bool = False):
"""Rebuild the system"""
rebuild_system(job, upgrade)

View File

@ -8,16 +8,35 @@ at api.skippedMigrations in userdata.json and populating it
with IDs of the migrations to skip.
Adding DISABLE_ALL to that array disables the migrations module entirely.
"""
from selfprivacy_api.utils import ReadUserData, UserDataFiles
from selfprivacy_api.migrations.write_token_to_redis import WriteTokenToRedis
from selfprivacy_api.migrations.check_for_system_rebuild_jobs import (
CheckForSystemRebuildJobs,
from selfprivacy_api.migrations.check_for_failed_binds_migration import (
CheckForFailedBindsMigration,
)
from selfprivacy_api.utils import ReadUserData
from selfprivacy_api.migrations.fix_nixos_config_branch import FixNixosConfigBranch
from selfprivacy_api.migrations.create_tokens_json import CreateTokensJson
from selfprivacy_api.migrations.migrate_to_selfprivacy_channel import (
MigrateToSelfprivacyChannel,
)
from selfprivacy_api.migrations.mount_volume import MountVolume
from selfprivacy_api.migrations.providers import CreateProviderFields
from selfprivacy_api.migrations.prepare_for_nixos_2211 import (
MigrateToSelfprivacyChannelFrom2205,
)
from selfprivacy_api.migrations.prepare_for_nixos_2305 import (
MigrateToSelfprivacyChannelFrom2211,
)
from selfprivacy_api.migrations.redis_tokens import LoadTokensToRedis
migrations = [
WriteTokenToRedis(),
CheckForSystemRebuildJobs(),
FixNixosConfigBranch(),
CreateTokensJson(),
MigrateToSelfprivacyChannel(),
MountVolume(),
CheckForFailedBindsMigration(),
CreateProviderFields(),
MigrateToSelfprivacyChannelFrom2205(),
MigrateToSelfprivacyChannelFrom2211(),
LoadTokensToRedis(),
]
@ -26,7 +45,7 @@ def run_migrations():
Go over all migrations. If they are not skipped in userdata file, run them
if the migration needed.
"""
with ReadUserData(UserDataFiles.SECRETS) as data:
with ReadUserData() as data:
if "api" not in data:
skipped_migrations = []
elif "skippedMigrations" not in data["api"]:

View File

@ -0,0 +1,48 @@
from selfprivacy_api.jobs import JobStatus, Jobs
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.utils import WriteUserData
class CheckForFailedBindsMigration(Migration):
"""Mount volume."""
def get_migration_name(self):
return "check_for_failed_binds_migration"
def get_migration_description(self):
return "If binds migration failed, try again."
def is_migration_needed(self):
try:
jobs = Jobs.get_jobs()
# If there is a job with type_id "migrations.migrate_to_binds" and status is not "FINISHED",
# then migration is needed and job is deleted
for job in jobs:
if (
job.type_id == "migrations.migrate_to_binds"
and job.status != JobStatus.FINISHED
):
return True
return False
except Exception as e:
print(e)
return False
def migrate(self):
# Get info about existing volumes
# Write info about volumes to userdata.json
try:
jobs = Jobs.get_jobs()
for job in jobs:
if (
job.type_id == "migrations.migrate_to_binds"
and job.status != JobStatus.FINISHED
):
Jobs.remove(job)
with WriteUserData() as userdata:
userdata["useBinds"] = False
print("Done")
except Exception as e:
print(e)
print("Error mounting volume")

View File

@ -1,47 +0,0 @@
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.jobs import JobStatus, Jobs
class CheckForSystemRebuildJobs(Migration):
"""Check if there are unfinished system rebuild jobs and finish them"""
def get_migration_name(self):
return "check_for_system_rebuild_jobs"
def get_migration_description(self):
return "Check if there are unfinished system rebuild jobs and finish them"
def is_migration_needed(self):
# Check if there are any unfinished system rebuild jobs
for job in Jobs.get_jobs():
if (
job.type_id
in [
"system.nixos.rebuild",
"system.nixos.upgrade",
]
) and job.status in [
JobStatus.CREATED,
JobStatus.RUNNING,
]:
return True
def migrate(self):
# As the API is restarted, we assume that the jobs are finished
for job in Jobs.get_jobs():
if (
job.type_id
in [
"system.nixos.rebuild",
"system.nixos.upgrade",
]
) and job.status in [
JobStatus.CREATED,
JobStatus.RUNNING,
]:
Jobs.update(
job=job,
status=JobStatus.FINISHED,
result="System rebuilt.",
progress=100,
)

View File

@ -0,0 +1,58 @@
from datetime import datetime
import os
import json
from pathlib import Path
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.utils import TOKENS_FILE, ReadUserData
class CreateTokensJson(Migration):
def get_migration_name(self):
return "create_tokens_json"
def get_migration_description(self):
return """Selfprivacy API used a single token in userdata.json for authentication.
This migration creates a new tokens.json file with the old token in it.
This migration runs if the tokens.json file does not exist.
Old token is located at ["api"]["token"] in userdata.json.
tokens.json path is declared in TOKENS_FILE imported from utils.py
tokens.json must have the following format:
{
"tokens": [
{
"token": "token_string",
"name": "Master Token",
"date": "current date from str(datetime.now())",
}
]
}
tokens.json must have 0600 permissions.
"""
def is_migration_needed(self):
return not os.path.exists(TOKENS_FILE)
def migrate(self):
try:
print(f"Creating tokens.json file at {TOKENS_FILE}")
with ReadUserData() as userdata:
token = userdata["api"]["token"]
# Touch tokens.json with 0600 permissions
Path(TOKENS_FILE).touch(mode=0o600)
# Write token to tokens.json
structure = {
"tokens": [
{
"token": token,
"name": "primary_token",
"date": str(datetime.now()),
}
]
}
with open(TOKENS_FILE, "w", encoding="utf-8") as tokens:
json.dump(structure, tokens, indent=4)
print("Done")
except Exception as e:
print(e)
print("Error creating tokens.json")

View File

@ -0,0 +1,57 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
class FixNixosConfigBranch(Migration):
def get_migration_name(self):
return "fix_nixos_config_branch"
def get_migration_description(self):
return """Mobile SelfPrivacy app introduced a bug in version 0.4.0.
New servers were initialized with a rolling-testing nixos config branch.
This was fixed in app version 0.4.2, but existing servers were not updated.
This migration fixes this by changing the nixos config branch to master.
"""
def is_migration_needed(self):
"""Check the current branch of /etc/nixos and return True if it is rolling-testing"""
current_working_directory = os.getcwd()
try:
os.chdir("/etc/nixos")
nixos_config_branch = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"], start_new_session=True
)
os.chdir(current_working_directory)
return nixos_config_branch.decode("utf-8").strip() == "rolling-testing"
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
return False
def migrate(self):
"""Affected server pulled the config with the --single-branch flag.
Git config remote.origin.fetch has to be changed, so all branches will be fetched.
Then, fetch all branches, pull and switch to master branch.
"""
print("Fixing Nixos config branch")
current_working_directory = os.getcwd()
try:
os.chdir("/etc/nixos")
subprocess.check_output(
[
"git",
"config",
"remote.origin.fetch",
"+refs/heads/*:refs/remotes/origin/*",
]
)
subprocess.check_output(["git", "fetch", "--all"])
subprocess.check_output(["git", "pull"])
subprocess.check_output(["git", "checkout", "master"])
os.chdir(current_working_directory)
print("Done")
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
print("Error")

View File

@ -0,0 +1,49 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
class MigrateToSelfprivacyChannel(Migration):
"""Migrate to selfprivacy Nix channel."""
def get_migration_name(self):
return "migrate_to_selfprivacy_channel"
def get_migration_description(self):
return "Migrate to selfprivacy Nix channel."
def is_migration_needed(self):
try:
output = subprocess.check_output(
["nix-channel", "--list"], start_new_session=True
)
output = output.decode("utf-8")
first_line = output.split("\n", maxsplit=1)[0]
return first_line.startswith("nixos") and (
first_line.endswith("nixos-21.11") or first_line.endswith("nixos-21.05")
)
except subprocess.CalledProcessError:
return False
def migrate(self):
# Change the channel and update them.
# Also, go to /etc/nixos directory and make a git pull
current_working_directory = os.getcwd()
try:
print("Changing channel")
os.chdir("/etc/nixos")
subprocess.check_output(
[
"nix-channel",
"--add",
"https://channel.selfprivacy.org/nixos-selfpricacy",
"nixos",
]
)
subprocess.check_output(["nix-channel", "--update"])
subprocess.check_output(["git", "pull"])
os.chdir(current_working_directory)
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
print("Error")

View File

@ -0,0 +1,51 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.utils.block_devices import BlockDevices
class MountVolume(Migration):
"""Mount volume."""
def get_migration_name(self):
return "mount_volume"
def get_migration_description(self):
return "Mount volume if it is not mounted."
def is_migration_needed(self):
try:
with ReadUserData() as userdata:
return "volumes" not in userdata
except Exception as e:
print(e)
return False
def migrate(self):
# Get info about existing volumes
# Write info about volumes to userdata.json
try:
volumes = BlockDevices().get_block_devices()
# If there is an unmounted volume sdb,
# Write it to userdata.json
is_there_a_volume = False
for volume in volumes:
if volume.name == "sdb":
is_there_a_volume = True
break
with WriteUserData() as userdata:
userdata["volumes"] = []
if is_there_a_volume:
userdata["volumes"].append(
{
"device": "/dev/sdb",
"mountPoint": "/volumes/sdb",
"fsType": "ext4",
}
)
print("Done")
except Exception as e:
print(e)
print("Error mounting volume")

View File

@ -0,0 +1,58 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
class MigrateToSelfprivacyChannelFrom2205(Migration):
"""Migrate to selfprivacy Nix channel.
For some reason NixOS 22.05 servers initialized with the nixos channel instead of selfprivacy.
This stops us from upgrading to NixOS 22.11
"""
def get_migration_name(self):
return "migrate_to_selfprivacy_channel_from_2205"
def get_migration_description(self):
return "Migrate to selfprivacy Nix channel from NixOS 22.05."
def is_migration_needed(self):
try:
output = subprocess.check_output(
["nix-channel", "--list"], start_new_session=True
)
output = output.decode("utf-8")
first_line = output.split("\n", maxsplit=1)[0]
return first_line.startswith("nixos") and (
first_line.endswith("nixos-22.05")
)
except subprocess.CalledProcessError:
return False
def migrate(self):
# Change the channel and update them.
# Also, go to /etc/nixos directory and make a git pull
current_working_directory = os.getcwd()
try:
print("Changing channel")
os.chdir("/etc/nixos")
subprocess.check_output(
[
"nix-channel",
"--add",
"https://channel.selfprivacy.org/nixos-selfpricacy",
"nixos",
]
)
subprocess.check_output(["nix-channel", "--update"])
nixos_config_branch = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"], start_new_session=True
)
if nixos_config_branch.decode("utf-8").strip() == "api-redis":
print("Also changing nixos-config branch from api-redis to master")
subprocess.check_output(["git", "checkout", "master"])
subprocess.check_output(["git", "pull"])
os.chdir(current_working_directory)
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
print("Error")

View File

@ -0,0 +1,58 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
class MigrateToSelfprivacyChannelFrom2211(Migration):
"""Migrate to selfprivacy Nix channel.
For some reason NixOS 22.11 servers initialized with the nixos channel instead of selfprivacy.
This stops us from upgrading to NixOS 23.05
"""
def get_migration_name(self):
return "migrate_to_selfprivacy_channel_from_2211"
def get_migration_description(self):
return "Migrate to selfprivacy Nix channel from NixOS 22.11."
def is_migration_needed(self):
try:
output = subprocess.check_output(
["nix-channel", "--list"], start_new_session=True
)
output = output.decode("utf-8")
first_line = output.split("\n", maxsplit=1)[0]
return first_line.startswith("nixos") and (
first_line.endswith("nixos-22.11")
)
except subprocess.CalledProcessError:
return False
def migrate(self):
# Change the channel and update them.
# Also, go to /etc/nixos directory and make a git pull
current_working_directory = os.getcwd()
try:
print("Changing channel")
os.chdir("/etc/nixos")
subprocess.check_output(
[
"nix-channel",
"--add",
"https://channel.selfprivacy.org/nixos-selfpricacy",
"nixos",
]
)
subprocess.check_output(["nix-channel", "--update"])
nixos_config_branch = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"], start_new_session=True
)
if nixos_config_branch.decode("utf-8").strip() == "api-redis":
print("Also changing nixos-config branch from api-redis to master")
subprocess.check_output(["git", "checkout", "master"])
subprocess.check_output(["git", "pull"])
os.chdir(current_working_directory)
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
print("Error")

View File

@ -0,0 +1,43 @@
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.utils import ReadUserData, WriteUserData
class CreateProviderFields(Migration):
"""Unhardcode providers"""
def get_migration_name(self):
return "create_provider_fields"
def get_migration_description(self):
return "Add DNS, backup and server provider fields to enable user to choose between different clouds and to make the deployment adapt to these preferences."
def is_migration_needed(self):
try:
with ReadUserData() as userdata:
return "dns" not in userdata
except Exception as e:
print(e)
return False
def migrate(self):
# Write info about providers to userdata.json
try:
with WriteUserData() as userdata:
userdata["dns"] = {
"provider": "CLOUDFLARE",
"apiKey": userdata["cloudflare"]["apiKey"],
}
userdata["server"] = {
"provider": "HETZNER",
}
userdata["backup"] = {
"provider": "BACKBLAZE",
"accountId": userdata["backblaze"]["accountId"],
"accountKey": userdata["backblaze"]["accountKey"],
"bucket": userdata["backblaze"]["bucket"],
}
print("Done")
except Exception as e:
print(e)
print("Error migrating provider fields")

View File

@ -0,0 +1,48 @@
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.repositories.tokens.json_tokens_repository import (
JsonTokensRepository,
)
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
RedisTokensRepository,
)
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
AbstractTokensRepository,
)
class LoadTokensToRedis(Migration):
"""Load Json tokens into Redis"""
def get_migration_name(self):
return "load_tokens_to_redis"
def get_migration_description(self):
return "Loads access tokens and recovery keys from legacy json file into redis token storage"
def is_repo_empty(self, repo: AbstractTokensRepository) -> bool:
if repo.get_tokens() != []:
return False
if repo.get_recovery_key() is not None:
return False
return True
def is_migration_needed(self):
try:
if not self.is_repo_empty(JsonTokensRepository()) and self.is_repo_empty(
RedisTokensRepository()
):
return True
except Exception as e:
print(e)
return False
def migrate(self):
# Write info about providers to userdata.json
try:
RedisTokensRepository().clone(JsonTokensRepository())
print("Done")
except Exception as e:
print(e)
print("Error migrating access tokens from json to redis")

View File

@ -1,63 +0,0 @@
from datetime import datetime
from typing import Optional
from selfprivacy_api.migrations.migration import Migration
from selfprivacy_api.models.tokens.token import Token
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
RedisTokensRepository,
)
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
AbstractTokensRepository,
)
from selfprivacy_api.utils import ReadUserData, UserDataFiles
class WriteTokenToRedis(Migration):
"""Load Json tokens into Redis"""
def get_migration_name(self):
return "write_token_to_redis"
def get_migration_description(self):
return "Loads the initial token into redis token storage"
def is_repo_empty(self, repo: AbstractTokensRepository) -> bool:
if repo.get_tokens() != []:
return False
return True
def get_token_from_json(self) -> Optional[Token]:
try:
with ReadUserData(UserDataFiles.SECRETS) as userdata:
return Token(
token=userdata["api"]["token"],
device_name="Initial device",
created_at=datetime.now(),
)
except Exception as e:
print(e)
return None
def is_migration_needed(self):
try:
if self.get_token_from_json() is not None and self.is_repo_empty(
RedisTokensRepository()
):
return True
except Exception as e:
print(e)
return False
def migrate(self):
# Write info about providers to userdata.json
try:
token = self.get_token_from_json()
if token is None:
print("No token found in secrets.json")
return
RedisTokensRepository()._store_token(token)
print("Done")
except Exception as e:
print(e)
print("Error migrating access tokens from json to redis")

View File

@ -1,24 +0,0 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel
class ServiceStatus(Enum):
"""Enum for service status"""
ACTIVE = "ACTIVE"
RELOADING = "RELOADING"
INACTIVE = "INACTIVE"
FAILED = "FAILED"
ACTIVATING = "ACTIVATING"
DEACTIVATING = "DEACTIVATING"
OFF = "OFF"
class ServiceDnsRecord(BaseModel):
type: str
name: str
content: str
ttl: int
display_name: str
priority: Optional[int] = None

View File

@ -0,0 +1,8 @@
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
AbstractTokensRepository,
)
from selfprivacy_api.repositories.tokens.json_tokens_repository import (
JsonTokensRepository,
)
repository = JsonTokensRepository()

View File

@ -0,0 +1,153 @@
"""
temporary legacy
"""
from typing import Optional
from datetime import datetime, timezone
from selfprivacy_api.utils import UserDataFiles, WriteUserData, ReadUserData
from selfprivacy_api.models.tokens.token import Token
from selfprivacy_api.models.tokens.recovery_key import RecoveryKey
from selfprivacy_api.models.tokens.new_device_key import NewDeviceKey
from selfprivacy_api.repositories.tokens.exceptions import (
TokenNotFound,
)
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
AbstractTokensRepository,
)
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
class JsonTokensRepository(AbstractTokensRepository):
def get_tokens(self) -> list[Token]:
"""Get the tokens"""
tokens_list = []
with ReadUserData(UserDataFiles.TOKENS) as tokens_file:
for userdata_token in tokens_file["tokens"]:
tokens_list.append(
Token(
token=userdata_token["token"],
device_name=userdata_token["name"],
created_at=userdata_token["date"],
)
)
return tokens_list
def _store_token(self, new_token: Token):
"""Store a token directly"""
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
tokens_file["tokens"].append(
{
"token": new_token.token,
"name": new_token.device_name,
"date": new_token.created_at.strftime(DATETIME_FORMAT),
}
)
def delete_token(self, input_token: Token) -> None:
"""Delete the token"""
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
for userdata_token in tokens_file["tokens"]:
if userdata_token["token"] == input_token.token:
tokens_file["tokens"].remove(userdata_token)
return
raise TokenNotFound("Token not found!")
def __key_date_from_str(self, date_string: str) -> datetime:
if date_string is None or date_string == "":
return None
# we assume that we store dates in json as naive utc
utc_no_tz = datetime.fromisoformat(date_string)
utc_with_tz = utc_no_tz.replace(tzinfo=timezone.utc)
return utc_with_tz
def __date_from_tokens_file(
self, tokens_file: object, tokenfield: str, datefield: str
):
date_string = tokens_file[tokenfield].get(datefield)
return self.__key_date_from_str(date_string)
def get_recovery_key(self) -> Optional[RecoveryKey]:
"""Get the recovery key"""
with ReadUserData(UserDataFiles.TOKENS) as tokens_file:
if (
"recovery_token" not in tokens_file
or tokens_file["recovery_token"] is None
):
return
recovery_key = RecoveryKey(
key=tokens_file["recovery_token"].get("token"),
created_at=self.__date_from_tokens_file(
tokens_file, "recovery_token", "date"
),
expires_at=self.__date_from_tokens_file(
tokens_file, "recovery_token", "expiration"
),
uses_left=tokens_file["recovery_token"].get("uses_left"),
)
return recovery_key
def _store_recovery_key(self, recovery_key: RecoveryKey) -> None:
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
key_expiration: Optional[str] = None
if recovery_key.expires_at is not None:
key_expiration = recovery_key.expires_at.strftime(DATETIME_FORMAT)
tokens_file["recovery_token"] = {
"token": recovery_key.key,
"date": recovery_key.created_at.strftime(DATETIME_FORMAT),
"expiration": key_expiration,
"uses_left": recovery_key.uses_left,
}
def _decrement_recovery_token(self):
"""Decrement recovery key use count by one"""
if self.is_recovery_key_valid():
with WriteUserData(UserDataFiles.TOKENS) as tokens:
if tokens["recovery_token"]["uses_left"] is not None:
tokens["recovery_token"]["uses_left"] -= 1
def _delete_recovery_key(self) -> None:
"""Delete the recovery key"""
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
if "recovery_token" in tokens_file:
del tokens_file["recovery_token"]
return
def _store_new_device_key(self, new_device_key: NewDeviceKey) -> None:
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
tokens_file["new_device"] = {
"token": new_device_key.key,
"date": new_device_key.created_at.strftime(DATETIME_FORMAT),
"expiration": new_device_key.expires_at.strftime(DATETIME_FORMAT),
}
def delete_new_device_key(self) -> None:
"""Delete the new device key"""
with WriteUserData(UserDataFiles.TOKENS) as tokens_file:
if "new_device" in tokens_file:
del tokens_file["new_device"]
return
def _get_stored_new_device_key(self) -> Optional[NewDeviceKey]:
"""Retrieves new device key that is already stored."""
with ReadUserData(UserDataFiles.TOKENS) as tokens_file:
if "new_device" not in tokens_file or tokens_file["new_device"] is None:
return
new_device_key = NewDeviceKey(
key=tokens_file["new_device"]["token"],
created_at=self.__date_from_tokens_file(
tokens_file, "new_device", "date"
),
expires_at=self.__date_from_tokens_file(
tokens_file, "new_device", "expiration"
),
)
return new_device_key

View File

@ -30,7 +30,7 @@ class RedisTokensRepository(AbstractTokensRepository):
@staticmethod
def token_key_for_device(device_name: str):
md5_hash = md5(usedforsecurity=False)
md5_hash = md5()
md5_hash.update(bytes(device_name, "utf-8"))
digest = md5_hash.hexdigest()
return TOKENS_PREFIX + digest

View File

View File

@ -0,0 +1,125 @@
from datetime import datetime
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from selfprivacy_api.actions.api_tokens import (
CannotDeleteCallerException,
InvalidExpirationDate,
InvalidUsesLeft,
NotFoundException,
delete_api_token,
refresh_api_token,
get_api_recovery_token_status,
get_api_tokens_with_caller_flag,
get_new_api_recovery_key,
use_mnemonic_recovery_token,
delete_new_device_auth_token,
get_new_device_auth_token,
use_new_device_auth_token,
)
from selfprivacy_api.dependencies import TokenHeader, get_token_header
router = APIRouter(
prefix="/auth",
tags=["auth"],
responses={404: {"description": "Not found"}},
)
@router.get("/tokens")
async def rest_get_tokens(auth_token: TokenHeader = Depends(get_token_header)):
"""Get the tokens info"""
return get_api_tokens_with_caller_flag(auth_token.token)
class DeleteTokenInput(BaseModel):
"""Delete token input"""
token_name: str
@router.delete("/tokens")
async def rest_delete_tokens(
token: DeleteTokenInput, auth_token: TokenHeader = Depends(get_token_header)
):
"""Delete the tokens"""
try:
delete_api_token(auth_token.token, token.token_name)
except NotFoundException:
raise HTTPException(status_code=404, detail="Token not found")
except CannotDeleteCallerException:
raise HTTPException(status_code=400, detail="Cannot delete caller's token")
return {"message": "Token deleted"}
@router.post("/tokens")
async def rest_refresh_token(auth_token: TokenHeader = Depends(get_token_header)):
"""Refresh the token"""
try:
new_token = refresh_api_token(auth_token.token)
except NotFoundException:
raise HTTPException(status_code=404, detail="Token not found")
return {"token": new_token}
@router.get("/recovery_token")
async def rest_get_recovery_token_status(
auth_token: TokenHeader = Depends(get_token_header),
):
return get_api_recovery_token_status()
class CreateRecoveryTokenInput(BaseModel):
expiration: Optional[datetime] = None
uses: Optional[int] = None
@router.post("/recovery_token")
async def rest_create_recovery_token(
limits: CreateRecoveryTokenInput = CreateRecoveryTokenInput(),
auth_token: TokenHeader = Depends(get_token_header),
):
try:
token = get_new_api_recovery_key(limits.expiration, limits.uses)
except InvalidExpirationDate as e:
raise HTTPException(status_code=400, detail=str(e))
except InvalidUsesLeft as e:
raise HTTPException(status_code=400, detail=str(e))
return {"token": token}
class UseTokenInput(BaseModel):
token: str
device: str
@router.post("/recovery_token/use")
async def rest_use_recovery_token(input: UseTokenInput):
token = use_mnemonic_recovery_token(input.token, input.device)
if token is None:
raise HTTPException(status_code=404, detail="Token not found")
return {"token": token}
@router.post("/new_device")
async def rest_new_device(auth_token: TokenHeader = Depends(get_token_header)):
token = get_new_device_auth_token()
return {"token": token}
@router.delete("/new_device")
async def rest_delete_new_device_token(
auth_token: TokenHeader = Depends(get_token_header),
):
delete_new_device_auth_token()
return {"token": None}
@router.post("/new_device/authorize")
async def rest_new_device_authorize(input: UseTokenInput):
token = use_new_device_auth_token(input.token, input.device)
if token is None:
raise HTTPException(status_code=404, detail="Token not found")
return {"message": "Device authorized", "token": token}

View File

@ -0,0 +1,336 @@
"""Basic services legacy api"""
import base64
from typing import Optional
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from selfprivacy_api.actions.ssh import (
InvalidPublicKey,
KeyAlreadyExists,
KeyNotFound,
create_ssh_key,
enable_ssh,
get_ssh_settings,
remove_ssh_key,
set_ssh_settings,
)
from selfprivacy_api.actions.users import UserNotFound, get_user_by_username
from selfprivacy_api.dependencies import get_token_header
from selfprivacy_api.services.bitwarden import Bitwarden
from selfprivacy_api.services.gitea import Gitea
from selfprivacy_api.services.mailserver import MailServer
from selfprivacy_api.services.nextcloud import Nextcloud
from selfprivacy_api.services.ocserv import Ocserv
from selfprivacy_api.services.pleroma import Pleroma
from selfprivacy_api.services.service import ServiceStatus
from selfprivacy_api.utils import get_dkim_key, get_domain
router = APIRouter(
prefix="/services",
tags=["services"],
dependencies=[Depends(get_token_header)],
responses={404: {"description": "Not found"}},
)
def service_status_to_return_code(status: ServiceStatus):
"""Converts service status object to return code for
compatibility with legacy api"""
if status == ServiceStatus.ACTIVE:
return 0
elif status == ServiceStatus.FAILED:
return 1
elif status == ServiceStatus.INACTIVE:
return 3
elif status == ServiceStatus.OFF:
return 4
else:
return 2
@router.get("/status")
async def get_status():
"""Get the status of the services"""
mail_status = MailServer.get_status()
bitwarden_status = Bitwarden.get_status()
gitea_status = Gitea.get_status()
nextcloud_status = Nextcloud.get_status()
ocserv_stauts = Ocserv.get_status()
pleroma_status = Pleroma.get_status()
return {
"imap": service_status_to_return_code(mail_status),
"smtp": service_status_to_return_code(mail_status),
"http": 0,
"bitwarden": service_status_to_return_code(bitwarden_status),
"gitea": service_status_to_return_code(gitea_status),
"nextcloud": service_status_to_return_code(nextcloud_status),
"ocserv": service_status_to_return_code(ocserv_stauts),
"pleroma": service_status_to_return_code(pleroma_status),
}
@router.post("/bitwarden/enable")
async def enable_bitwarden():
"""Enable Bitwarden"""
Bitwarden.enable()
return {
"status": 0,
"message": "Bitwarden enabled",
}
@router.post("/bitwarden/disable")
async def disable_bitwarden():
"""Disable Bitwarden"""
Bitwarden.disable()
return {
"status": 0,
"message": "Bitwarden disabled",
}
@router.post("/gitea/enable")
async def enable_gitea():
"""Enable Gitea"""
Gitea.enable()
return {
"status": 0,
"message": "Gitea enabled",
}
@router.post("/gitea/disable")
async def disable_gitea():
"""Disable Gitea"""
Gitea.disable()
return {
"status": 0,
"message": "Gitea disabled",
}
@router.get("/mailserver/dkim")
async def get_mailserver_dkim():
"""Get the DKIM record for the mailserver"""
domain = get_domain()
dkim = get_dkim_key(domain, parse=False)
if dkim is None:
raise HTTPException(status_code=404, detail="DKIM record not found")
dkim = base64.b64encode(dkim.encode("utf-8")).decode("utf-8")
return dkim
@router.post("/nextcloud/enable")
async def enable_nextcloud():
"""Enable Nextcloud"""
Nextcloud.enable()
return {
"status": 0,
"message": "Nextcloud enabled",
}
@router.post("/nextcloud/disable")
async def disable_nextcloud():
"""Disable Nextcloud"""
Nextcloud.disable()
return {
"status": 0,
"message": "Nextcloud disabled",
}
@router.post("/ocserv/enable")
async def enable_ocserv():
"""Enable Ocserv"""
Ocserv.enable()
return {
"status": 0,
"message": "Ocserv enabled",
}
@router.post("/ocserv/disable")
async def disable_ocserv():
"""Disable Ocserv"""
Ocserv.disable()
return {
"status": 0,
"message": "Ocserv disabled",
}
@router.post("/pleroma/enable")
async def enable_pleroma():
"""Enable Pleroma"""
Pleroma.enable()
return {
"status": 0,
"message": "Pleroma enabled",
}
@router.post("/pleroma/disable")
async def disable_pleroma():
"""Disable Pleroma"""
Pleroma.disable()
return {
"status": 0,
"message": "Pleroma disabled",
}
@router.get("/restic/backup/list")
async def get_restic_backup_list():
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.put("/restic/backup/create")
async def create_restic_backup():
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.get("/restic/backup/status")
async def get_restic_backup_status():
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.get("/restic/backup/reload")
async def reload_restic_backup():
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
class BackupRestoreInput(BaseModel):
backupId: str
@router.put("/restic/backup/restore")
async def restore_restic_backup(backup: BackupRestoreInput):
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
class BackupConfigInput(BaseModel):
accountId: str
accountKey: str
bucket: str
@router.put("/restic/backblaze/config")
async def set_backblaze_config(backup_config: BackupConfigInput):
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.post("/ssh/enable")
async def rest_enable_ssh():
"""Enable SSH"""
enable_ssh()
return {
"status": 0,
"message": "SSH enabled",
}
@router.get("/ssh")
async def rest_get_ssh():
"""Get the SSH configuration"""
settings = get_ssh_settings()
return {
"enable": settings.enable,
"passwordAuthentication": settings.passwordAuthentication,
}
class SshConfigInput(BaseModel):
enable: Optional[bool] = None
passwordAuthentication: Optional[bool] = None
@router.put("/ssh")
async def rest_set_ssh(ssh_config: SshConfigInput):
"""Set the SSH configuration"""
set_ssh_settings(ssh_config.enable, ssh_config.passwordAuthentication)
return "SSH settings changed"
class SshKeyInput(BaseModel):
public_key: str
@router.put("/ssh/key/send", status_code=201)
async def rest_send_ssh_key(input: SshKeyInput):
"""Send the SSH key"""
try:
create_ssh_key("root", input.public_key)
except KeyAlreadyExists as error:
raise HTTPException(status_code=409, detail="Key already exists") from error
except InvalidPublicKey as error:
raise HTTPException(
status_code=400,
detail="Invalid key type. Only ssh-ed25519 and ssh-rsa are supported",
) from error
return {
"status": 0,
"message": "SSH key sent",
}
@router.get("/ssh/keys/{username}")
async def rest_get_ssh_keys(username: str):
"""Get the SSH keys for a user"""
user = get_user_by_username(username)
if user is None:
raise HTTPException(status_code=404, detail="User not found")
return user.ssh_keys
@router.post("/ssh/keys/{username}", status_code=201)
async def rest_add_ssh_key(username: str, input: SshKeyInput):
try:
create_ssh_key(username, input.public_key)
except KeyAlreadyExists as error:
raise HTTPException(status_code=409, detail="Key already exists") from error
except InvalidPublicKey as error:
raise HTTPException(
status_code=400,
detail="Invalid key type. Only ssh-ed25519 and ssh-rsa are supported",
) from error
except UserNotFound as error:
raise HTTPException(status_code=404, detail="User not found") from error
return {
"message": "New SSH key successfully written",
}
@router.delete("/ssh/keys/{username}")
async def rest_delete_ssh_key(username: str, input: SshKeyInput):
try:
remove_ssh_key(username, input.public_key)
except KeyNotFound as error:
raise HTTPException(status_code=404, detail="Key not found") from error
except UserNotFound as error:
raise HTTPException(status_code=404, detail="User not found") from error
return {"message": "SSH key deleted"}

View File

@ -0,0 +1,105 @@
from typing import Optional
from fastapi import APIRouter, Body, Depends, HTTPException
from pydantic import BaseModel
from selfprivacy_api.dependencies import get_token_header
import selfprivacy_api.actions.system as system_actions
router = APIRouter(
prefix="/system",
tags=["system"],
dependencies=[Depends(get_token_header)],
responses={404: {"description": "Not found"}},
)
@router.get("/configuration/timezone")
async def get_timezone():
"""Get the timezone of the server"""
return system_actions.get_timezone()
class ChangeTimezoneRequestBody(BaseModel):
"""Change the timezone of the server"""
timezone: str
@router.put("/configuration/timezone")
async def change_timezone(timezone: ChangeTimezoneRequestBody):
"""Change the timezone of the server"""
try:
system_actions.change_timezone(timezone.timezone)
except system_actions.InvalidTimezone as e:
raise HTTPException(status_code=400, detail=str(e))
return {"timezone": timezone.timezone}
@router.get("/configuration/autoUpgrade")
async def get_auto_upgrade_settings():
"""Get the auto-upgrade settings"""
return system_actions.get_auto_upgrade_settings().dict()
class AutoUpgradeSettings(BaseModel):
"""Settings for auto-upgrading user data"""
enable: Optional[bool] = None
allowReboot: Optional[bool] = None
@router.put("/configuration/autoUpgrade")
async def set_auto_upgrade_settings(settings: AutoUpgradeSettings):
"""Set the auto-upgrade settings"""
system_actions.set_auto_upgrade_settings(settings.enable, settings.allowReboot)
return "Auto-upgrade settings changed"
@router.get("/configuration/apply")
async def apply_configuration():
"""Apply the configuration"""
return_code = system_actions.rebuild_system()
return return_code
@router.get("/configuration/rollback")
async def rollback_configuration():
"""Rollback the configuration"""
return_code = system_actions.rollback_system()
return return_code
@router.get("/configuration/upgrade")
async def upgrade_configuration():
"""Upgrade the configuration"""
return_code = system_actions.upgrade_system()
return return_code
@router.get("/reboot")
async def reboot_system():
"""Reboot the system"""
system_actions.reboot_system()
return "System reboot has started"
@router.get("/version")
async def get_system_version():
"""Get the system version"""
return {"system_version": system_actions.get_system_version()}
@router.get("/pythonVersion")
async def get_python_version():
"""Get the Python version"""
return system_actions.get_python_version()
@router.get("/configuration/pull")
async def pull_configuration():
"""Pull the configuration"""
action_result = system_actions.pull_repository_changes()
if action_result.status == 0:
return action_result.dict()
raise HTTPException(status_code=500, detail=action_result.dict())

View File

@ -0,0 +1,62 @@
"""Users management module"""
from typing import Optional
from fastapi import APIRouter, Body, Depends, HTTPException
from pydantic import BaseModel
import selfprivacy_api.actions.users as users_actions
from selfprivacy_api.dependencies import get_token_header
router = APIRouter(
prefix="/users",
tags=["users"],
dependencies=[Depends(get_token_header)],
responses={404: {"description": "Not found"}},
)
@router.get("")
async def get_users(withMainUser: bool = False):
"""Get the list of users"""
users: list[users_actions.UserDataUser] = users_actions.get_users(
exclude_primary=not withMainUser, exclude_root=True
)
return [user.username for user in users]
class UserInput(BaseModel):
"""User input"""
username: str
password: str
@router.post("", status_code=201)
async def create_user(user: UserInput):
try:
users_actions.create_user(user.username, user.password)
except users_actions.PasswordIsEmpty as e:
raise HTTPException(status_code=400, detail=str(e))
except users_actions.UsernameForbidden as e:
raise HTTPException(status_code=409, detail=str(e))
except users_actions.UsernameNotAlphanumeric as e:
raise HTTPException(status_code=400, detail=str(e))
except users_actions.UsernameTooLong as e:
raise HTTPException(status_code=400, detail=str(e))
except users_actions.UserAlreadyExists as e:
raise HTTPException(status_code=409, detail=str(e))
return {"result": 0, "username": user.username}
@router.delete("/{username}")
async def delete_user(username: str):
try:
users_actions.delete_user(username)
except users_actions.UserNotFound as e:
raise HTTPException(status_code=404, detail=str(e))
except users_actions.UserIsProtected as e:
raise HTTPException(status_code=400, detail=str(e))
return {"result": 0, "username": username}

View File

@ -3,7 +3,7 @@
import typing
from selfprivacy_api.services.bitwarden import Bitwarden
from selfprivacy_api.services.gitea import Gitea
from selfprivacy_api.services.jitsimeet import JitsiMeet
from selfprivacy_api.services.jitsi import Jitsi
from selfprivacy_api.services.mailserver import MailServer
from selfprivacy_api.services.nextcloud import Nextcloud
from selfprivacy_api.services.pleroma import Pleroma
@ -18,7 +18,7 @@ services: list[Service] = [
Nextcloud(),
Pleroma(),
Ocserv(),
JitsiMeet(),
Jitsi(),
]
@ -54,20 +54,14 @@ def get_all_required_dns_records() -> list[ServiceDnsRecord]:
name="api",
content=ip4,
ttl=3600,
display_name="SelfPrivacy API",
),
ServiceDnsRecord(
type="AAAA",
name="api",
content=ip6,
ttl=3600,
),
]
if ip6 is not None:
dns_records.append(
ServiceDnsRecord(
type="AAAA",
name="api",
content=ip6,
ttl=3600,
display_name="SelfPrivacy API (IPv6)",
)
)
for service in get_enabled_services():
dns_records += service.get_dns_records(ip4, ip6)
dns_records += service.get_dns_records()
return dns_records

View File

@ -1,12 +1,15 @@
"""Class representing Bitwarden service"""
import base64
import subprocess
from typing import Optional, List
import typing
from selfprivacy_api.utils import get_domain
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.bitwarden.icon import BITWARDEN_ICON
@ -38,15 +41,11 @@ class Bitwarden(Service):
return "vaultwarden"
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = get_domain()
return f"https://password.{domain}"
@staticmethod
def get_subdomain() -> Optional[str]:
return "password"
@staticmethod
def is_movable() -> bool:
return True
@ -59,6 +58,11 @@ class Bitwarden(Service):
def get_backup_description() -> str:
return "Password database, encryption certificate and attachments."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("bitwarden", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
"""
@ -72,6 +76,22 @@ class Bitwarden(Service):
"""
return get_service_status("vaultwarden.service")
@staticmethod
def enable():
"""Enable Bitwarden service."""
with WriteUserData() as user_data:
if "bitwarden" not in user_data:
user_data["bitwarden"] = {}
user_data["bitwarden"]["enable"] = True
@staticmethod
def disable():
"""Disable Bitwarden service."""
with WriteUserData() as user_data:
if "bitwarden" not in user_data:
user_data["bitwarden"] = {}
user_data["bitwarden"]["enable"] = False
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "vaultwarden.service"])
@ -97,5 +117,40 @@ class Bitwarden(Service):
return ""
@staticmethod
def get_folders() -> List[str]:
def get_folders() -> typing.List[str]:
return ["/var/lib/bitwarden", "/var/lib/bitwarden_rs"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
"""Return list of DNS records for Bitwarden service."""
return [
ServiceDnsRecord(
type="A",
name="password",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="password",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.bitwarden.move",
name="Move Bitwarden",
description=f"Moving Bitwarden data to {volume.name}",
)
move_service(
self,
volume,
job,
FolderMoveNames.default_foldermoves(self),
"bitwarden",
)
return job

View File

@ -0,0 +1,258 @@
"""Generic handler for moving services"""
from __future__ import annotations
import subprocess
import time
import pathlib
import shutil
from pydantic import BaseModel
from selfprivacy_api.jobs import Job, JobStatus, Jobs
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.owned_path import OwnedPath
class FolderMoveNames(BaseModel):
name: str
bind_location: str
owner: str
group: str
@staticmethod
def from_owned_path(path: OwnedPath) -> FolderMoveNames:
return FolderMoveNames(
name=FolderMoveNames.get_foldername(path.path),
bind_location=path.path,
owner=path.owner,
group=path.group,
)
@staticmethod
def get_foldername(path: str) -> str:
return path.split("/")[-1]
@staticmethod
def default_foldermoves(service: Service) -> list[FolderMoveNames]:
return [
FolderMoveNames.from_owned_path(folder)
for folder in service.get_owned_folders()
]
@huey.task()
def move_service(
service: Service,
volume: BlockDevice,
job: Job,
folder_names: list[FolderMoveNames],
userdata_location: str,
):
"""Move a service to another volume."""
job = Jobs.update(
job=job,
status_text="Performing pre-move checks...",
status=JobStatus.RUNNING,
)
service_name = service.get_display_name()
with ReadUserData() as user_data:
if not user_data.get("useBinds", False):
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="Server is not using binds.",
)
return
# Check if we are on the same volume
old_volume = service.get_drive()
if old_volume == volume.name:
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=f"{service_name} is already on this volume.",
)
return
# Check if there is enough space on the new volume
if int(volume.fsavail) < service.get_storage_usage():
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="Not enough space on the new volume.",
)
return
# Make sure the volume is mounted
if not volume.is_root() and f"/volumes/{volume.name}" not in volume.mountpoints:
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="Volume is not mounted.",
)
return
# Make sure current actual directory exists and if its user and group are correct
for folder in folder_names:
if not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").exists():
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=f"{service_name} is not found.",
)
return
if not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").is_dir():
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=f"{service_name} is not a directory.",
)
return
if (
not pathlib.Path(f"/volumes/{old_volume}/{folder.name}").owner()
== folder.owner
):
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=f"{service_name} owner is not {folder.owner}.",
)
return
# Stop service
Jobs.update(
job=job,
status=JobStatus.RUNNING,
status_text=f"Stopping {service_name}...",
progress=5,
)
service.stop()
# Wait for the service to stop, check every second
# If it does not stop in 30 seconds, abort
for _ in range(30):
if service.get_status() not in (
ServiceStatus.ACTIVATING,
ServiceStatus.DEACTIVATING,
):
break
time.sleep(1)
else:
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=f"{service_name} did not stop in 30 seconds.",
)
return
# Unmount old volume
Jobs.update(
job=job,
status_text="Unmounting old folder...",
status=JobStatus.RUNNING,
progress=10,
)
for folder in folder_names:
try:
subprocess.run(
["umount", folder.bind_location],
check=True,
)
except subprocess.CalledProcessError:
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="Unable to unmount old volume.",
)
return
# Move data to new volume and set correct permissions
Jobs.update(
job=job,
status_text="Moving data to new volume...",
status=JobStatus.RUNNING,
progress=20,
)
current_progress = 20
folder_percentage = 50 // len(folder_names)
for folder in folder_names:
shutil.move(
f"/volumes/{old_volume}/{folder.name}",
f"/volumes/{volume.name}/{folder.name}",
)
Jobs.update(
job=job,
status_text="Moving data to new volume...",
status=JobStatus.RUNNING,
progress=current_progress + folder_percentage,
)
Jobs.update(
job=job,
status_text=f"Making sure {service_name} owns its files...",
status=JobStatus.RUNNING,
progress=70,
)
for folder in folder_names:
try:
subprocess.run(
[
"chown",
"-R",
f"{folder.owner}:{folder.group}",
f"/volumes/{volume.name}/{folder.name}",
],
check=True,
)
except subprocess.CalledProcessError as error:
print(error.output)
Jobs.update(
job=job,
status=JobStatus.RUNNING,
error=f"Unable to set ownership of new volume. {service_name} may not be able to access its files. Continuing anyway.",
)
# Mount new volume
Jobs.update(
job=job,
status_text=f"Mounting {service_name} data...",
status=JobStatus.RUNNING,
progress=90,
)
for folder in folder_names:
try:
subprocess.run(
[
"mount",
"--bind",
f"/volumes/{volume.name}/{folder.name}",
folder.bind_location,
],
check=True,
)
except subprocess.CalledProcessError as error:
print(error.output)
Jobs.update(
job=job,
status=JobStatus.ERROR,
error="Unable to mount new volume.",
)
return
# Update userdata
Jobs.update(
job=job,
status_text="Finishing move...",
status=JobStatus.RUNNING,
progress=95,
)
with WriteUserData() as user_data:
if userdata_location not in user_data:
user_data[userdata_location] = {}
user_data[userdata_location]["location"] = volume.name
# Start service
service.start()
Jobs.update(
job=job,
status=JobStatus.FINISHED,
result=f"{service_name} moved successfully.",
status_text=f"Starting {service_name}...",
progress=100,
)

View File

@ -1,17 +1,16 @@
"""Generic service status fetcher using systemctl"""
import subprocess
from typing import List
from selfprivacy_api.models.services import ServiceStatus
from selfprivacy_api.services.service import ServiceStatus
def get_service_status(unit: str) -> ServiceStatus:
def get_service_status(service: str) -> ServiceStatus:
"""
Return service status from systemd.
Use systemctl show to get the status of a service.
Get ActiveState from the output.
"""
service_status = subprocess.check_output(["systemctl", "show", unit])
service_status = subprocess.check_output(["systemctl", "show", service])
if b"LoadState=not-found" in service_status:
return ServiceStatus.OFF
if b"ActiveState=active" in service_status:
@ -59,24 +58,3 @@ def get_service_status_from_several_units(services: list[str]) -> ServiceStatus:
if ServiceStatus.ACTIVE in service_statuses:
return ServiceStatus.ACTIVE
return ServiceStatus.OFF
def get_last_log_lines(service: str, lines_count: int) -> List[str]:
if lines_count < 1:
raise ValueError("lines_count must be greater than 0")
try:
logs = subprocess.check_output(
[
"journalctl",
"-u",
service,
"-n",
str(lines_count),
"-o",
"cat",
],
shell=False,
).decode("utf-8")
return logs.splitlines()
except subprocess.CalledProcessError:
return []

View File

@ -1,12 +1,15 @@
"""Class representing Bitwarden service"""
import base64
import subprocess
from typing import Optional, List
import typing
from selfprivacy_api.utils import get_domain
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.gitea.icon import GITEA_ICON
@ -34,15 +37,11 @@ class Gitea(Service):
return base64.b64encode(GITEA_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = get_domain()
return f"https://git.{domain}"
@staticmethod
def get_subdomain() -> Optional[str]:
return "git"
@staticmethod
def is_movable() -> bool:
return True
@ -55,6 +54,11 @@ class Gitea(Service):
def get_backup_description() -> str:
return "Git repositories, database and user data."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("gitea", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
"""
@ -67,6 +71,22 @@ class Gitea(Service):
"""
return get_service_status("gitea.service")
@staticmethod
def enable():
"""Enable Gitea service."""
with WriteUserData() as user_data:
if "gitea" not in user_data:
user_data["gitea"] = {}
user_data["gitea"]["enable"] = True
@staticmethod
def disable():
"""Disable Gitea service."""
with WriteUserData() as user_data:
if "gitea" not in user_data:
user_data["gitea"] = {}
user_data["gitea"]["enable"] = False
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "gitea.service"])
@ -92,5 +112,39 @@ class Gitea(Service):
return ""
@staticmethod
def get_folders() -> List[str]:
def get_folders() -> typing.List[str]:
return ["/var/lib/gitea"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
return [
ServiceDnsRecord(
type="A",
name="git",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="git",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.gitea.move",
name="Move Gitea",
description=f"Moving Gitea data to {volume.name}",
)
move_service(
self,
volume,
job,
FolderMoveNames.default_foldermoves(self),
"gitea",
)
return job

View File

@ -1,35 +1,36 @@
"""Class representing Jitsi Meet service"""
"""Class representing Jitsi service"""
import base64
import subprocess
from typing import Optional, List
import typing
from selfprivacy_api.jobs import Job
from selfprivacy_api.utils.systemd import (
from selfprivacy_api.services.generic_status_getter import (
get_service_status_from_several_units,
)
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.utils import get_domain
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.services.jitsimeet.icon import JITSI_ICON
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.jitsi.icon import JITSI_ICON
class JitsiMeet(Service):
class Jitsi(Service):
"""Class representing Jitsi service"""
@staticmethod
def get_id() -> str:
"""Return service id."""
return "jitsi-meet"
return "jitsi"
@staticmethod
def get_display_name() -> str:
"""Return service display name."""
return "JitsiMeet"
return "Jitsi"
@staticmethod
def get_description() -> str:
"""Return service description."""
return "Jitsi Meet is a free and open-source video conferencing solution."
return "Jitsi is a free and open-source video conferencing solution."
@staticmethod
def get_svg_icon() -> str:
@ -37,15 +38,11 @@ class JitsiMeet(Service):
return base64.b64encode(JITSI_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = get_domain()
return f"https://meet.{domain}"
@staticmethod
def get_subdomain() -> Optional[str]:
return "meet"
@staticmethod
def is_movable() -> bool:
return False
@ -58,12 +55,33 @@ class JitsiMeet(Service):
def get_backup_description() -> str:
return "Secrets that are used to encrypt the communication."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("jitsi", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
return get_service_status_from_several_units(
["jitsi-videobridge.service", "jicofo.service"]
)
@staticmethod
def enable():
"""Enable Jitsi service."""
with WriteUserData() as user_data:
if "jitsi" not in user_data:
user_data["jitsi"] = {}
user_data["jitsi"]["enable"] = True
@staticmethod
def disable():
"""Disable Gitea service."""
with WriteUserData() as user_data:
if "jitsi" not in user_data:
user_data["jitsi"] = {}
user_data["jitsi"]["enable"] = False
@staticmethod
def stop():
subprocess.run(
@ -101,8 +119,27 @@ class JitsiMeet(Service):
return ""
@staticmethod
def get_folders() -> List[str]:
def get_folders() -> typing.List[str]:
return ["/var/lib/jitsi-meet"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
ip4 = network_utils.get_ip4()
ip6 = network_utils.get_ip6()
return [
ServiceDnsRecord(
type="A",
name="meet",
content=ip4,
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="meet",
content=ip6,
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
raise NotImplementedError("jitsi-meet service is not movable")
raise NotImplementedError("jitsi service is not movable")

View File

@ -2,13 +2,17 @@
import base64
import subprocess
from typing import Optional, List
import typing
from selfprivacy_api.utils.systemd import (
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_status_getter import (
get_service_status_from_several_units,
)
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api import utils
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.mailserver.icon import MAILSERVER_ICON
@ -17,7 +21,7 @@ class MailServer(Service):
@staticmethod
def get_id() -> str:
return "simple-nixos-mailserver"
return "email"
@staticmethod
def get_display_name() -> str:
@ -36,14 +40,10 @@ class MailServer(Service):
return "virtualMail"
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
return None
@staticmethod
def get_subdomain() -> Optional[str]:
return None
@staticmethod
def is_movable() -> bool:
return True
@ -102,64 +102,62 @@ class MailServer(Service):
return ""
@staticmethod
def get_folders() -> List[str]:
def get_folders() -> typing.List[str]:
return ["/var/vmail", "/var/sieve"]
@classmethod
def get_dns_records(cls, ip4: str, ip6: Optional[str]) -> List[ServiceDnsRecord]:
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
domain = utils.get_domain()
dkim_record = utils.get_dkim_key(domain)
ip4 = network_utils.get_ip4()
ip6 = network_utils.get_ip6()
if dkim_record is None:
return []
dns_records = [
return [
ServiceDnsRecord(
type="A",
name=domain,
content=ip4,
ttl=3600,
display_name="Root Domain",
),
ServiceDnsRecord(
type="MX",
type="AAAA",
name=domain,
content=domain,
content=ip6,
ttl=3600,
priority=10,
display_name="Mail server record",
),
ServiceDnsRecord(
type="TXT",
name="_dmarc",
content="v=DMARC1; p=none",
ttl=18000,
display_name="DMARC record",
type="MX", name=domain, content=domain, ttl=3600, priority=10
),
ServiceDnsRecord(
type="TXT", name="_dmarc", content="v=DMARC1; p=none", ttl=18000
),
ServiceDnsRecord(
type="TXT",
name=domain,
content=f"v=spf1 a mx ip4:{ip4} -all",
ttl=18000,
display_name="SPF record",
),
ServiceDnsRecord(
type="TXT",
name="selector._domainkey",
content=dkim_record,
ttl=18000,
display_name="DKIM key",
type="TXT", name="selector._domainkey", content=dkim_record, ttl=18000
),
]
if ip6 is not None:
dns_records.append(
ServiceDnsRecord(
type="AAAA",
name=domain,
content=ip6,
ttl=3600,
display_name="Root Domain (IPv6)",
),
)
return dns_records
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.email.move",
name="Move Mail Server",
description=f"Moving mailserver data to {volume.name}",
)
move_service(
self,
volume,
job,
FolderMoveNames.default_foldermoves(self),
"email",
)
return job

View File

@ -1,72 +0,0 @@
"""Generic handler for moving services"""
from __future__ import annotations
import shutil
from typing import List
from selfprivacy_api.jobs import Job, report_progress
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.services.owned_path import Bind
class MoveError(Exception):
"""Move of the data has failed"""
def check_volume(volume: BlockDevice, space_needed: int) -> None:
# Check if there is enough space on the new volume
if int(volume.fsavail) < space_needed:
raise MoveError("Not enough space on the new volume.")
# Make sure the volume is mounted
if not volume.is_root() and f"/volumes/{volume.name}" not in volume.mountpoints:
raise MoveError("Volume is not mounted.")
def check_binds(volume_name: str, binds: List[Bind]) -> None:
# Make sure current actual directory exists and if its user and group are correct
for bind in binds:
bind.validate()
def unbind_folders(owned_folders: List[Bind]) -> None:
for folder in owned_folders:
folder.unbind()
# May be moved into Bind
def move_data_to_volume(
binds: List[Bind],
new_volume: BlockDevice,
job: Job,
) -> List[Bind]:
current_progress = job.progress
if current_progress is None:
current_progress = 0
progress_per_folder = 50 // len(binds)
for bind in binds:
old_location = bind.location_at_volume()
bind.drive = new_volume
new_location = bind.location_at_volume()
try:
shutil.move(old_location, new_location)
except Exception as error:
raise MoveError(
f"could not move {old_location} to {new_location} : {str(error)}"
) from error
progress = current_progress + progress_per_folder
report_progress(progress, job, "Moving data to new volume...")
return binds
def ensure_folder_ownership(folders: List[Bind]) -> None:
for folder in folders:
folder.ensure_ownership()
def bind_folders(folders: List[Bind]):
for folder in folders:
folder.bind()

View File

@ -1,14 +1,14 @@
"""Class representing Nextcloud service."""
import base64
import subprocess
from typing import Optional, List
from selfprivacy_api.utils import get_domain
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.nextcloud.icon import NEXTCLOUD_ICON
@ -36,15 +36,11 @@ class Nextcloud(Service):
return base64.b64encode(NEXTCLOUD_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = get_domain()
return f"https://cloud.{domain}"
@staticmethod
def get_subdomain() -> Optional[str]:
return "cloud"
@staticmethod
def is_movable() -> bool:
return True
@ -57,6 +53,11 @@ class Nextcloud(Service):
def get_backup_description() -> str:
return "All the files and other data stored in Nextcloud."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("nextcloud", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
"""
@ -70,6 +71,22 @@ class Nextcloud(Service):
"""
return get_service_status("phpfpm-nextcloud.service")
@staticmethod
def enable():
"""Enable Nextcloud service."""
with WriteUserData() as user_data:
if "nextcloud" not in user_data:
user_data["nextcloud"] = {}
user_data["nextcloud"]["enable"] = True
@staticmethod
def disable():
"""Disable Nextcloud service."""
with WriteUserData() as user_data:
if "nextcloud" not in user_data:
user_data["nextcloud"] = {}
user_data["nextcloud"]["enable"] = False
@staticmethod
def stop():
"""Stop Nextcloud service."""
@ -100,5 +117,37 @@ class Nextcloud(Service):
return ""
@staticmethod
def get_folders() -> List[str]:
def get_folders() -> typing.List[str]:
return ["/var/lib/nextcloud"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
return [
ServiceDnsRecord(
type="A",
name="cloud",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="cloud",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.nextcloud.move",
name="Move Nextcloud",
description=f"Moving Nextcloud to volume {volume.name}",
)
move_service(
self,
volume,
job,
FolderMoveNames.default_foldermoves(self),
"nextcloud",
)
return job

View File

@ -3,10 +3,12 @@ import base64
import subprocess
import typing
from selfprivacy_api.jobs import Job
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.services.ocserv.icon import OCSERV_ICON
import selfprivacy_api.utils.network as network_utils
class Ocserv(Service):
@ -33,10 +35,6 @@ class Ocserv(Service):
"""Return service url."""
return None
@staticmethod
def get_subdomain() -> typing.Optional[str]:
return "vpn"
@staticmethod
def is_movable() -> bool:
return False
@ -53,10 +51,29 @@ class Ocserv(Service):
def get_backup_description() -> str:
return "Nothing to backup."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("ocserv", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
return get_service_status("ocserv.service")
@staticmethod
def enable():
with WriteUserData() as user_data:
if "ocserv" not in user_data:
user_data["ocserv"] = {}
user_data["ocserv"]["enable"] = True
@staticmethod
def disable():
with WriteUserData() as user_data:
if "ocserv" not in user_data:
user_data["ocserv"] = {}
user_data["ocserv"]["enable"] = False
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "ocserv.service"], check=False)
@ -81,6 +98,23 @@ class Ocserv(Service):
def get_logs():
return ""
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
return [
ServiceDnsRecord(
type="A",
name="vpn",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="vpn",
content=network_utils.get_ip6(),
ttl=3600,
),
]
@staticmethod
def get_folders() -> typing.List[str]:
return []

View File

@ -1,126 +1,7 @@
from __future__ import annotations
import subprocess
import pathlib
from pydantic import BaseModel
from os.path import exists
from selfprivacy_api.utils.block_devices import BlockDevice, BlockDevices
# tests override it to a tmpdir
VOLUMES_PATH = "/volumes"
class BindError(Exception):
pass
class OwnedPath(BaseModel):
"""
A convenient interface for explicitly defining ownership of service folders.
One overrides Service.get_owned_paths() for this.
Why this exists?:
One could use Bind to define ownership but then one would need to handle drive which
is unnecessary and produces code duplication.
It is also somewhat semantically wrong to include Owned Path into Bind
instead of user and group. Because owner and group in Bind are applied to
the original folder on the drive, not to the binding path. But maybe it is
ok since they are technically both owned. Idk yet.
"""
path: str
owner: str
group: str
class Bind:
"""
A directory that resides on some volume but we mount it into fs where we need it.
Used for storing service data.
"""
def __init__(self, binding_path: str, owner: str, group: str, drive: BlockDevice):
self.binding_path = binding_path
self.owner = owner
self.group = group
self.drive = drive
# TODO: delete owned path interface from Service
@staticmethod
def from_owned_path(path: OwnedPath, drive_name: str) -> Bind:
drive = BlockDevices().get_block_device(drive_name)
if drive is None:
raise BindError(f"No such drive: {drive_name}")
return Bind(
binding_path=path.path, owner=path.owner, group=path.group, drive=drive
)
def bind_foldername(self) -> str:
return self.binding_path.split("/")[-1]
def location_at_volume(self) -> str:
return f"{VOLUMES_PATH}/{self.drive.name}/{self.bind_foldername()}"
def validate(self) -> None:
path = pathlib.Path(self.location_at_volume())
if not path.exists():
raise BindError(f"directory {path} is not found.")
if not path.is_dir():
raise BindError(f"{path} is not a directory.")
if path.owner() != self.owner:
raise BindError(f"{path} is not owned by {self.owner}.")
def bind(self) -> None:
if not exists(self.binding_path):
raise BindError(f"cannot bind to a non-existing path: {self.binding_path}")
source = self.location_at_volume()
target = self.binding_path
try:
subprocess.run(
["mount", "--bind", source, target],
stderr=subprocess.PIPE,
check=True,
)
except subprocess.CalledProcessError as error:
print(error.stderr)
raise BindError(f"Unable to bind {source} to {target} :{error.stderr}")
def unbind(self) -> None:
if not exists(self.binding_path):
raise BindError(f"cannot unbind a non-existing path: {self.binding_path}")
try:
subprocess.run(
# umount -l ?
["umount", self.binding_path],
check=True,
)
except subprocess.CalledProcessError:
raise BindError(f"Unable to unmount folder {self.binding_path}.")
pass
def ensure_ownership(self) -> None:
true_location = self.location_at_volume()
try:
subprocess.run(
[
"chown",
"-R",
f"{self.owner}:{self.group}",
# Could we just chown the binded location instead?
true_location,
],
check=True,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as error:
print(error.stderr)
error_message = (
f"Unable to set ownership of {true_location} :{error.stderr}"
)
raise BindError(error_message)

View File

@ -1,14 +1,15 @@
"""Class representing Nextcloud service."""
import base64
import subprocess
from typing import Optional, List
from selfprivacy_api.utils import get_domain
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api.utils.systemd import get_service_status
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.pleroma.icon import PLEROMA_ICON
@ -32,15 +33,11 @@ class Pleroma(Service):
return base64.b64encode(PLEROMA_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = get_domain()
return f"https://social.{domain}"
@staticmethod
def get_subdomain() -> Optional[str]:
return "social"
@staticmethod
def is_movable() -> bool:
return True
@ -53,10 +50,29 @@ class Pleroma(Service):
def get_backup_description() -> str:
return "Your Pleroma accounts, posts and media."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
return user_data.get("pleroma", {}).get("enable", False)
@staticmethod
def get_status() -> ServiceStatus:
return get_service_status("pleroma.service")
@staticmethod
def enable():
with WriteUserData() as user_data:
if "pleroma" not in user_data:
user_data["pleroma"] = {}
user_data["pleroma"]["enable"] = True
@staticmethod
def disable():
with WriteUserData() as user_data:
if "pleroma" not in user_data:
user_data["pleroma"] = {}
user_data["pleroma"]["enable"] = False
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "pleroma.service"])
@ -85,10 +101,10 @@ class Pleroma(Service):
return ""
@staticmethod
def get_owned_folders() -> List[OwnedPath]:
def get_owned_folders() -> typing.List[OwnedPath]:
"""
Get a list of occupied directories with ownership info
Pleroma has folders that are owned by different users
pleroma has folders that are owned by different users
"""
return [
OwnedPath(
@ -102,3 +118,35 @@ class Pleroma(Service):
group="postgres",
),
]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
return [
ServiceDnsRecord(
type="A",
name="social",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="social",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.pleroma.move",
name="Move Pleroma",
description=f"Moving Pleroma to volume {volume.name}",
)
move_service(
self,
volume,
job,
FolderMoveNames.default_foldermoves(self),
"pleroma",
)
return job

View File

@ -1,32 +1,41 @@
"""Abstract class for a service running on a server"""
from abc import ABC, abstractmethod
from typing import List, Optional
from enum import Enum
import typing
from pydantic import BaseModel
from selfprivacy_api.jobs import Job
from selfprivacy_api import utils
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.utils.waitloop import wait_until_true
from selfprivacy_api.utils.block_devices import BlockDevice, BlockDevices
from selfprivacy_api.jobs import Job, Jobs, JobStatus, report_progress
from selfprivacy_api.jobs.upgrade_system import rebuild_system
from selfprivacy_api.models.services import ServiceStatus, ServiceDnsRecord
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.owned_path import OwnedPath, Bind
from selfprivacy_api.services.moving import (
check_binds,
check_volume,
unbind_folders,
bind_folders,
ensure_folder_ownership,
MoveError,
move_data_to_volume,
)
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api import utils
from selfprivacy_api.utils.waitloop import wait_until_true
DEFAULT_START_STOP_TIMEOUT = 5 * 60
class ServiceStatus(Enum):
"""Enum for service status"""
ACTIVE = "ACTIVE"
RELOADING = "RELOADING"
INACTIVE = "INACTIVE"
FAILED = "FAILED"
ACTIVATING = "ACTIVATING"
DEACTIVATING = "DEACTIVATING"
OFF = "OFF"
class ServiceDnsRecord(BaseModel):
type: str
name: str
content: str
ttl: int
priority: typing.Optional[int] = None
class Service(ABC):
"""
Service here is some software that is hosted on the server and
@ -67,22 +76,14 @@ class Service(ABC):
@staticmethod
@abstractmethod
def get_url() -> Optional[str]:
def get_url() -> typing.Optional[str]:
"""
The url of the service if it is accessible from the internet browser.
"""
pass
@staticmethod
@abstractmethod
def get_subdomain() -> Optional[str]:
"""
The assigned primary subdomain for this service.
"""
pass
@classmethod
def get_user(cls) -> Optional[str]:
def get_user(cls) -> typing.Optional[str]:
"""
The user that owns the service's files.
Defaults to the service's id.
@ -90,7 +91,7 @@ class Service(ABC):
return cls.get_id()
@classmethod
def get_group(cls) -> Optional[str]:
def get_group(cls) -> typing.Optional[str]:
"""
The group that owns the service's files.
Defaults to the service's user.
@ -123,17 +124,11 @@ class Service(ABC):
"""
pass
@classmethod
def is_enabled(cls) -> bool:
"""
`True` if the service is enabled.
`False` if it is not enabled or not defined in file
If there is nothing in the file, this is equivalent to False
because NixOS won't enable it then.
"""
name = cls.get_id()
with ReadUserData() as user_data:
return user_data.get("modules", {}).get(name, {}).get("enable", False)
@staticmethod
@abstractmethod
def is_enabled() -> bool:
"""`True` if the service is enabled."""
pass
@staticmethod
@abstractmethod
@ -141,25 +136,17 @@ class Service(ABC):
"""The status of the service, reported by systemd."""
pass
@classmethod
def _set_enable(cls, enable: bool):
name = cls.get_id()
with WriteUserData() as user_data:
if "modules" not in user_data:
user_data["modules"] = {}
if name not in user_data["modules"]:
user_data["modules"][name] = {}
user_data["modules"][name]["enable"] = enable
@classmethod
def enable(cls):
@staticmethod
@abstractmethod
def enable():
"""Enable the service. Usually this means enabling systemd unit."""
cls._set_enable(True)
pass
@classmethod
def disable(cls):
@staticmethod
@abstractmethod
def disable():
"""Disable the service. Usually this means disabling systemd unit."""
cls._set_enable(False)
pass
@staticmethod
@abstractmethod
@ -206,32 +193,10 @@ class Service(ABC):
storage_used += get_storage_usage(folder)
return storage_used
@classmethod
def get_dns_records(cls, ip4: str, ip6: Optional[str]) -> List[ServiceDnsRecord]:
subdomain = cls.get_subdomain()
display_name = cls.get_display_name()
if subdomain is None:
return []
dns_records = [
ServiceDnsRecord(
type="A",
name=subdomain,
content=ip4,
ttl=3600,
display_name=display_name,
)
]
if ip6 is not None:
dns_records.append(
ServiceDnsRecord(
type="AAAA",
name=subdomain,
content=ip6,
ttl=3600,
display_name=f"{display_name} (IPv6)",
)
)
return dns_records
@staticmethod
@abstractmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
pass
@classmethod
def get_drive(cls) -> str:
@ -244,19 +209,15 @@ class Service(ABC):
return root_device
with utils.ReadUserData() as userdata:
if userdata.get("useBinds", False):
return (
userdata.get("modules", {})
.get(cls.get_id(), {})
.get(
"location",
root_device,
)
return userdata.get(cls.get_id(), {}).get(
"location",
root_device,
)
else:
return root_device
@classmethod
def get_folders(cls) -> List[str]:
def get_folders(cls) -> typing.List[str]:
"""
get a plain list of occupied directories
Default extracts info from overriden get_owned_folders()
@ -268,7 +229,7 @@ class Service(ABC):
return [owned_folder.path for owned_folder in cls.get_owned_folders()]
@classmethod
def get_owned_folders(cls) -> List[OwnedPath]:
def get_owned_folders(cls) -> typing.List[OwnedPath]:
"""
Get a list of occupied directories with ownership info
Default extracts info from overriden get_folders()
@ -283,137 +244,17 @@ class Service(ABC):
def get_foldername(path: str) -> str:
return path.split("/")[-1]
# TODO: with better json utils, it can be one line, and not a separate function
@classmethod
def set_location(cls, volume: BlockDevice):
"""
Only changes userdata
"""
service_id = cls.get_id()
with WriteUserData() as user_data:
if "modules" not in user_data:
user_data["modules"] = {}
if service_id not in user_data["modules"]:
user_data["modules"][service_id] = {}
user_data["modules"][service_id]["location"] = volume.name
def binds(self) -> List[Bind]:
owned_folders = self.get_owned_folders()
return [
Bind.from_owned_path(folder, self.get_drive()) for folder in owned_folders
]
def assert_can_move(self, new_volume):
"""
Checks if the service can be moved to new volume
Raises errors if it cannot
"""
service_name = self.get_display_name()
if not self.is_movable():
raise MoveError(f"{service_name} is not movable")
with ReadUserData() as user_data:
if not user_data.get("useBinds", False):
raise MoveError("Server is not using binds.")
current_volume_name = self.get_drive()
if current_volume_name == new_volume.name:
raise MoveError(f"{service_name} is already on volume {new_volume}")
check_volume(new_volume, space_needed=self.get_storage_usage())
binds = self.binds()
if binds == []:
raise MoveError("nothing to move")
check_binds(current_volume_name, binds)
def do_move_to_volume(
self,
new_volume: BlockDevice,
job: Job,
):
"""
Move a service to another volume.
Note: It may be much simpler to write it per bind, but a bit less safe?
"""
service_name = self.get_display_name()
binds = self.binds()
report_progress(10, job, "Unmounting folders from old volume...")
unbind_folders(binds)
report_progress(20, job, "Moving data to new volume...")
binds = move_data_to_volume(binds, new_volume, job)
report_progress(70, job, f"Making sure {service_name} owns its files...")
try:
ensure_folder_ownership(binds)
except Exception as error:
# We have logged it via print and we additionally log it here in the error field
# We are continuing anyway but Job has no warning field
Jobs.update(
job,
JobStatus.RUNNING,
error=f"Service {service_name} will not be able to write files: "
+ str(error),
)
report_progress(90, job, f"Mounting {service_name} data...")
bind_folders(binds)
report_progress(95, job, f"Finishing moving {service_name}...")
self.set_location(new_volume)
def move_to_volume(self, volume: BlockDevice, job: Job) -> Job:
service_name = self.get_display_name()
report_progress(0, job, "Performing pre-move checks...")
self.assert_can_move(volume)
report_progress(5, job, f"Stopping {service_name}...")
assert self is not None
with StoppedService(self):
report_progress(9, job, "Stopped service, starting the move...")
self.do_move_to_volume(volume, job)
report_progress(98, job, "Move complete, rebuilding...")
rebuild_system(job, upgrade=False)
Jobs.update(
job=job,
status=JobStatus.FINISHED,
result=f"{service_name} moved successfully.",
status_text=f"Starting {service_name}...",
progress=100,
)
return job
@abstractmethod
def move_to_volume(self, volume: BlockDevice) -> Job:
pass
@classmethod
def owned_path(cls, path: str):
"""Default folder ownership"""
service_name = cls.get_display_name()
try:
owner = cls.get_user()
if owner is None:
# TODO: assume root?
# (if we do not want to do assumptions, maybe not declare user optional?)
raise LookupError(f"no user for service: {service_name}")
group = cls.get_group()
if group is None:
raise LookupError(f"no group for service: {service_name}")
except Exception as error:
raise LookupError(
f"when deciding a bind for folder {path} of service {service_name}, error: {str(error)}"
)
"""A default guess on folder ownership"""
return OwnedPath(
path=path,
owner=owner,
group=group,
owner=cls.get_user(),
group=cls.get_group(),
)
def pre_backup(self):

View File

@ -1,22 +0,0 @@
from selfprivacy_api.services import Service
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.jobs import Job, Jobs, JobStatus
@huey.task()
def move_service(service: Service, new_volume: BlockDevice, job: Job) -> bool:
"""
Move service's folders to new physical volume
Does not raise exceptions (we cannot handle exceptions from tasks).
Reports all errors via job.
"""
try:
service.move_to_volume(new_volume, job)
except Exception as e:
Jobs.update(
job=job,
status=JobStatus.ERROR,
error=type(e).__name__ + " " + str(e),
)
return True

View File

@ -9,8 +9,9 @@ from os import path
# from enum import Enum
from selfprivacy_api.jobs import Job
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.test_service.icon import BITWARDEN_ICON
@ -21,19 +22,16 @@ class DummyService(Service):
"""A test service"""
folders: List[str] = []
startstop_delay = 0.0
startstop_delay = 0
backuppable = True
movable = True
# if False, we try to actually move
simulate_moving = True
drive = "sda1"
def __init_subclass__(cls, folders: List[str]):
cls.folders = folders
def __init__(self):
super().__init__()
with open(self.status_file(), "w") as file:
status_file = self.status_file()
with open(status_file, "w") as file:
file.write(ServiceStatus.ACTIVE.value)
@staticmethod
@ -64,12 +62,8 @@ class DummyService(Service):
return f"https://password.{domain}"
@staticmethod
def get_subdomain() -> typing.Optional[str]:
return "password"
@classmethod
def is_movable(cls) -> bool:
return cls.movable
def is_movable() -> bool:
return True
@staticmethod
def is_required() -> bool:
@ -79,6 +73,10 @@ class DummyService(Service):
def get_backup_description() -> str:
return "How did we get here?"
@staticmethod
def is_enabled() -> bool:
return True
@classmethod
def status_file(cls) -> str:
dir = cls.folders[0]
@ -88,7 +86,7 @@ class DummyService(Service):
@classmethod
def set_status(cls, status: ServiceStatus):
with open(cls.status_file(), "w") as file:
file.write(status.value)
status_string = file.write(status.value)
@classmethod
def get_status(cls) -> ServiceStatus:
@ -101,17 +99,16 @@ class DummyService(Service):
cls, new_status: ServiceStatus, delay_sec: float
):
"""simulating a delay on systemd side"""
if delay_sec == 0:
cls.set_status(new_status)
return
status_file = cls.status_file()
command = [
"bash",
"-c",
f" sleep {delay_sec} && echo {new_status.value} > {status_file}",
]
subprocess.Popen(command)
handle = subprocess.Popen(command)
if delay_sec == 0:
handle.communicate()
@classmethod
def set_backuppable(cls, new_value: bool) -> None:
@ -119,30 +116,22 @@ class DummyService(Service):
we can only set it up dynamically for tests via a classmethod"""
cls.backuppable = new_value
@classmethod
def set_movable(cls, new_value: bool) -> None:
"""For tests: because is_movale is static,
we can only set it up dynamically for tests via a classmethod"""
cls.movable = new_value
@classmethod
def can_be_backed_up(cls) -> bool:
"""`True` if the service can be backed up."""
return cls.backuppable
@classmethod
def set_delay(cls, new_delay_sec: float) -> None:
cls.startstop_delay = new_delay_sec
def enable(cls):
pass
@classmethod
def set_drive(cls, new_drive: str) -> None:
cls.drive = new_drive
def disable(cls, delay):
pass
@classmethod
def set_simulated_moves(cls, enabled: bool) -> None:
"""If True, this service will not actually call moving code
when moved"""
cls.simulate_moving = enabled
def set_delay(cls, new_delay):
cls.startstop_delay = new_delay
@classmethod
def stop(cls):
@ -180,17 +169,31 @@ class DummyService(Service):
storage_usage = 0
return storage_usage
@classmethod
def get_drive(cls) -> str:
return cls.drive
@staticmethod
def get_drive() -> str:
return "sda1"
@classmethod
def get_folders(cls) -> List[str]:
return cls.folders
def do_move_to_volume(self, volume: BlockDevice, job: Job) -> Job:
if self.simulate_moving is False:
return super(DummyService, self).do_move_to_volume(volume, job)
else:
self.set_drive(volume.name)
return job
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
"""Return list of DNS records for Bitwarden service."""
return [
ServiceDnsRecord(
type="A",
name="password",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="password",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
pass

View File

@ -1,14 +1,4 @@
from os import environ
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.backup.tasks import *
from selfprivacy_api.services.tasks import move_service
from selfprivacy_api.jobs.upgrade_system import rebuild_system_task
from selfprivacy_api.jobs.test import test_job
from selfprivacy_api.jobs.nix_collect_garbage import calculate_and_clear_dead_paths
if environ.get("TEST_MODE"):
from tests.test_huey import sum
from selfprivacy_api.backup.tasks import *
from selfprivacy_api.services.generic_service_mover import move_service

View File

@ -6,25 +6,27 @@ import json
import os
import subprocess
import portalocker
import typing
USERDATA_FILE = "/etc/nixos/userdata.json"
SECRETS_FILE = "/etc/selfprivacy/secrets.json"
DKIM_DIR = "/var/dkim/"
USERDATA_FILE = "/etc/nixos/userdata/userdata.json"
TOKENS_FILE = "/etc/nixos/userdata/tokens.json"
JOBS_FILE = "/etc/nixos/userdata/jobs.json"
DOMAIN_FILE = "/var/domain"
class UserDataFiles(Enum):
"""Enum for userdata files"""
USERDATA = 0
SECRETS = 3
TOKENS = 1
JOBS = 2
def get_domain():
"""Get domain from userdata.json"""
with ReadUserData() as user_data:
return user_data["domain"]
"""Get domain from /var/domain without trailing new line"""
with open(DOMAIN_FILE, "r", encoding="utf-8") as domain_file:
domain = domain_file.readline().rstrip()
return domain
class WriteUserData(object):
@ -33,12 +35,14 @@ class WriteUserData(object):
def __init__(self, file_type=UserDataFiles.USERDATA):
if file_type == UserDataFiles.USERDATA:
self.userdata_file = open(USERDATA_FILE, "r+", encoding="utf-8")
elif file_type == UserDataFiles.SECRETS:
elif file_type == UserDataFiles.TOKENS:
self.userdata_file = open(TOKENS_FILE, "r+", encoding="utf-8")
elif file_type == UserDataFiles.JOBS:
# Make sure file exists
if not os.path.exists(SECRETS_FILE):
with open(SECRETS_FILE, "w", encoding="utf-8") as secrets_file:
secrets_file.write("{}")
self.userdata_file = open(SECRETS_FILE, "r+", encoding="utf-8")
if not os.path.exists(JOBS_FILE):
with open(JOBS_FILE, "w", encoding="utf-8") as jobs_file:
jobs_file.write("{}")
self.userdata_file = open(JOBS_FILE, "r+", encoding="utf-8")
else:
raise ValueError("Unknown file type")
portalocker.lock(self.userdata_file, portalocker.LOCK_EX)
@ -62,11 +66,14 @@ class ReadUserData(object):
def __init__(self, file_type=UserDataFiles.USERDATA):
if file_type == UserDataFiles.USERDATA:
self.userdata_file = open(USERDATA_FILE, "r", encoding="utf-8")
elif file_type == UserDataFiles.SECRETS:
if not os.path.exists(SECRETS_FILE):
with open(SECRETS_FILE, "w", encoding="utf-8") as secrets_file:
secrets_file.write("{}")
self.userdata_file = open(SECRETS_FILE, "r", encoding="utf-8")
elif file_type == UserDataFiles.TOKENS:
self.userdata_file = open(TOKENS_FILE, "r", encoding="utf-8")
elif file_type == UserDataFiles.JOBS:
# Make sure file exists
if not os.path.exists(JOBS_FILE):
with open(JOBS_FILE, "w", encoding="utf-8") as jobs_file:
jobs_file.write("{}")
self.userdata_file = open(JOBS_FILE, "r", encoding="utf-8")
else:
raise ValueError("Unknown file type")
portalocker.lock(self.userdata_file, portalocker.LOCK_SH)
@ -159,31 +166,26 @@ def parse_date(date_str: str) -> datetime.datetime:
raise ValueError("Invalid date string")
def parse_dkim(dkim: str) -> str:
# extract key from file
dkim = dkim.split("(")[1]
dkim = dkim.split(")")[0]
# replace all quotes with nothing
dkim = dkim.replace('"', "")
# trim whitespace, remove newlines and tabs
dkim = dkim.strip()
dkim = dkim.replace("\n", "")
dkim = dkim.replace("\t", "")
# remove all redundant spaces
dkim = " ".join(dkim.split())
return dkim
def get_dkim_key(domain: str, parse: bool = True) -> typing.Optional[str]:
def get_dkim_key(domain, parse=True):
"""Get DKIM key from /var/dkim/<domain>.selector.txt"""
dkim_path = os.path.join(DKIM_DIR, domain + ".selector.txt")
if os.path.exists(dkim_path):
with open(dkim_path, encoding="utf-8") as dkim_file:
dkim = dkim_file.read()
if parse:
dkim = parse_dkim(dkim)
return dkim
if os.path.exists("/var/dkim/" + domain + ".selector.txt"):
cat_process = subprocess.Popen(
["cat", "/var/dkim/" + domain + ".selector.txt"], stdout=subprocess.PIPE
)
dkim = cat_process.communicate()[0]
if parse:
# Extract key from file
dkim = dkim.split(b"(")[1]
dkim = dkim.split(b")")[0]
# Replace all quotes with nothing
dkim = dkim.replace(b'"', b"")
# Trim whitespace, remove newlines and tabs
dkim = dkim.strip()
dkim = dkim.replace(b"\n", b"")
dkim = dkim.replace(b"\t", b"")
# Remove all redundant spaces
dkim = b" ".join(dkim.split())
return str(dkim, "utf-8")
return None

View File

@ -1,11 +1,8 @@
"""A block device API wrapping lsblk"""
from __future__ import annotations
"""Wrapper for block device functions."""
import subprocess
import json
import typing
from pydantic import BaseModel
from selfprivacy_api.utils import WriteUserData
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
@ -14,7 +11,6 @@ def get_block_device(device_name):
"""
Return a block device by name.
"""
# TODO: remove the function and related tests: dublicated by singleton
lsblk_output = subprocess.check_output(
[
"lsblk",
@ -47,37 +43,22 @@ class BlockDevice:
A block device.
"""
def __init__(self, device_dict: dict):
self.update_from_dict(device_dict)
def update_from_dict(self, device_dict: dict):
self.name = device_dict["name"]
self.path = device_dict["path"]
self.fsavail = str(device_dict["fsavail"])
self.fssize = str(device_dict["fssize"])
self.fstype = device_dict["fstype"]
self.fsused = str(device_dict["fsused"])
self.mountpoints = device_dict["mountpoints"]
self.label = device_dict["label"]
self.uuid = device_dict["uuid"]
self.size = str(device_dict["size"])
self.model = device_dict["model"]
self.serial = device_dict["serial"]
self.type = device_dict["type"]
def __init__(self, block_device):
self.name = block_device["name"]
self.path = block_device["path"]
self.fsavail = str(block_device["fsavail"])
self.fssize = str(block_device["fssize"])
self.fstype = block_device["fstype"]
self.fsused = str(block_device["fsused"])
self.mountpoints = block_device["mountpoints"]
self.label = block_device["label"]
self.uuid = block_device["uuid"]
self.size = str(block_device["size"])
self.model = block_device["model"]
self.serial = block_device["serial"]
self.type = block_device["type"]
self.locked = False
self.children: typing.List[BlockDevice] = []
if "children" in device_dict.keys():
for child in device_dict["children"]:
self.children.append(BlockDevice(child))
def all_children(self) -> typing.List[BlockDevice]:
result = []
for child in self.children:
result.extend(child.all_children())
result.append(child)
return result
def __str__(self):
return self.name
@ -101,7 +82,17 @@ class BlockDevice:
Update current data and return a dictionary of stats.
"""
device = get_block_device(self.name)
self.update_from_dict(device)
self.fsavail = str(device["fsavail"])
self.fssize = str(device["fssize"])
self.fstype = device["fstype"]
self.fsused = str(device["fsused"])
self.mountpoints = device["mountpoints"]
self.label = device["label"]
self.uuid = device["uuid"]
self.size = str(device["size"])
self.model = device["model"]
self.serial = device["serial"]
self.type = device["type"]
return {
"name": self.name,
@ -119,14 +110,6 @@ class BlockDevice:
"type": self.type,
}
def is_usable_partition(self):
# Ignore devices with type "rom"
if self.type == "rom":
return False
if self.fstype == "ext4":
return True
return False
def resize(self):
"""
Resize the block device.
@ -171,9 +154,6 @@ class BlockDevice:
return False
# TODO: SingletonMetaclass messes with tests and is able to persist state
# between them. If you have very weird test crosstalk that's probably why
# I am not sure it NEEDS to be SingletonMetaclass
class BlockDevices(metaclass=SingletonMetaclass):
"""Singleton holding all Block devices"""
@ -185,16 +165,41 @@ class BlockDevices(metaclass=SingletonMetaclass):
"""
Update the list of block devices.
"""
devices = BlockDevices.lsblk_devices()
children = []
devices = []
lsblk_output = subprocess.check_output(
[
"lsblk",
"-J",
"-b",
"-o",
"NAME,PATH,FSAVAIL,FSSIZE,FSTYPE,FSUSED,MOUNTPOINTS,LABEL,UUID,SIZE,MODEL,SERIAL,TYPE",
]
)
lsblk_output = lsblk_output.decode("utf-8")
lsblk_output = json.loads(lsblk_output)
for device in lsblk_output["blockdevices"]:
# Ignore devices with type "rom"
if device["type"] == "rom":
continue
# Ignore iso9660 devices
if device["fstype"] == "iso9660":
continue
if device["fstype"] is None:
if "children" in device:
for child in device["children"]:
if child["fstype"] == "ext4":
device = child
break
devices.append(device)
# Add new devices and delete non-existent devices
for device in devices:
children.extend(device.all_children())
devices.extend(children)
valid_devices = [device for device in devices if device.is_usable_partition()]
self.block_devices = valid_devices
if device["name"] not in [
block_device.name for block_device in self.block_devices
]:
self.block_devices.append(BlockDevice(device))
for block_device in self.block_devices:
if block_device.name not in [device["name"] for device in devices]:
self.block_devices.remove(block_device)
def get_block_device(self, name: str) -> typing.Optional[BlockDevice]:
"""
@ -231,25 +236,3 @@ class BlockDevices(metaclass=SingletonMetaclass):
if "/" in block_device.mountpoints:
return block_device
raise RuntimeError("No root block device found")
@staticmethod
def lsblk_device_dicts() -> typing.List[dict]:
lsblk_output_bytes = subprocess.check_output(
[
"lsblk",
"-J",
"-b",
"-o",
"NAME,PATH,FSAVAIL,FSSIZE,FSTYPE,FSUSED,MOUNTPOINTS,LABEL,UUID,SIZE,MODEL,SERIAL,TYPE",
]
)
lsblk_output = lsblk_output_bytes.decode("utf-8")
return json.loads(lsblk_output)["blockdevices"]
@staticmethod
def lsblk_devices() -> typing.List[BlockDevice]:
devices = []
for device in BlockDevices.lsblk_device_dicts():
devices.append(device)
return [BlockDevice(device) for device in devices]

View File

@ -1,24 +1,15 @@
"""MiniHuey singleton."""
from os import environ
from huey import RedisHuey
from selfprivacy_api.utils.redis_pool import RedisPool
HUEY_DATABASE_NUMBER = 10
def immediate() -> bool:
if environ.get("HUEY_QUEUES_FOR_TESTS"):
return False
if environ.get("TEST_MODE"):
return True
return False
import os
from huey import SqliteHuey
HUEY_DATABASE = "/etc/nixos/userdata/tasks.db"
# Singleton instance containing the huey database.
huey = RedisHuey(
"selfprivacy-api",
url=RedisPool.connection_url(dbnumber=HUEY_DATABASE_NUMBER),
immediate=immediate(),
test_mode = os.environ.get("TEST_MODE")
huey = SqliteHuey(
HUEY_DATABASE,
immediate=test_mode == "true",
utc=True,
)

View File

@ -2,7 +2,6 @@
"""Network utils"""
import subprocess
import re
import ipaddress
from typing import Optional
@ -18,15 +17,13 @@ def get_ip4() -> str:
return ip4.group(1) if ip4 else ""
def get_ip6() -> Optional[str]:
def get_ip6() -> str:
"""Get IPv6 address"""
try:
ip6_addresses = subprocess.check_output(
["ip", "addr", "show", "dev", "eth0"]
).decode("utf-8")
ip6_addresses = re.findall(r"inet6 (\S+)\/\d+", ip6_addresses)
for address in ip6_addresses:
if ipaddress.IPv6Address(address).is_global:
return address
ip6 = subprocess.check_output(["ip", "addr", "show", "dev", "eth0"]).decode(
"utf-8"
)
ip6 = re.search(r"inet6 (\S+)\/\d+", ip6)
except subprocess.CalledProcessError:
return None
ip6 = None
return ip6.group(1) if ip6 else ""

View File

@ -1,8 +1,8 @@
"""
Redis pool module for selfprivacy_api
"""
from os import environ
import redis
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
REDIS_SOCKET = "/run/redis-sp-api/redis.sock"
@ -14,19 +14,19 @@ class RedisPool(metaclass=SingletonMetaclass):
"""
def __init__(self):
self._pool = redis.ConnectionPool.from_url(
RedisPool.connection_url(dbnumber=0),
decode_responses=True,
)
self._pubsub_connection = self.get_connection()
if "USE_REDIS_PORT" in environ:
self._pool = redis.ConnectionPool(
host="127.0.0.1",
port=int(environ["USE_REDIS_PORT"]),
decode_responses=True,
)
@staticmethod
def connection_url(dbnumber: int) -> str:
"""
redis://[[username]:[password]]@localhost:6379/0
unix://[username@]/path/to/socket.sock?db=0[&password=password]
"""
return f"unix://{REDIS_SOCKET}?db={dbnumber}"
else:
self._pool = redis.ConnectionPool.from_url(
f"unix://{REDIS_SOCKET}",
decode_responses=True,
)
self._pubsub_connection = self.get_connection()
def get_connection(self):
"""

2
setup.py Normal file → Executable file
View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name="selfprivacy_api",
version="3.2.1",
version="2.4.2",
packages=find_packages(),
scripts=[
"selfprivacy_api/app.py",

48
shell.nix Normal file
View File

@ -0,0 +1,48 @@
{ pkgs ? import <nixos-22.11> { } }:
let
sp-python = pkgs.python310.withPackages (p: with p; [
setuptools
portalocker
pytz
pytest
pytest-mock
pytest-datadir
huey
gevent
mnemonic
coverage
pylint
rope
mypy
pylsp-mypy
pydantic
typing-extensions
psutil
black
fastapi
uvicorn
redis
strawberry-graphql
flake8-bugbear
flake8
]);
in
pkgs.mkShell {
buildInputs = [
sp-python
pkgs.black
pkgs.redis
pkgs.restic
pkgs.rclone
];
shellHook = ''
PYTHONPATH=${sp-python}/${sp-python.sitePackages}
# envs set with export and as attributes are treated differently.
# for example. printenv <Name> will not fetch the value of an attribute.
export USE_REDIS_PORT=6379
pkill redis-server
sleep 2
setsid redis-server --bind 127.0.0.1 --port $USE_REDIS_PORT >/dev/null 2>/dev/null &
# maybe set more env-vars
'';
}

View File

@ -1,4 +0,0 @@
#!/usr/bin/bash
# sync the version of nixpkgs used in the repo with one set in nixos-config
nix flake lock --override-input nixpkgs nixpkgs --inputs-from 'git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=flakes'

View File

@ -2,45 +2,43 @@ import json
from datetime import datetime, timezone, timedelta
from mnemonic import Mnemonic
from selfprivacy_api.jobs import Job, JobStatus
# for expiration tests. If headache, consider freezegun
RECOVERY_KEY_VALIDATION_DATETIME = "selfprivacy_api.models.tokens.time.datetime"
DEVICE_KEY_VALIDATION_DATETIME = RECOVERY_KEY_VALIDATION_DATETIME
def ten_hours_into_future_naive():
return datetime.now() + timedelta(hours=10)
def five_minutes_into_future_naive():
return datetime.now() + timedelta(minutes=5)
def ten_hours_into_future_naive_utc():
return datetime.utcnow() + timedelta(hours=10)
def five_minutes_into_future_naive_utc():
return datetime.utcnow() + timedelta(minutes=5)
def ten_hours_into_future():
return datetime.now(timezone.utc) + timedelta(hours=10)
def five_minutes_into_future():
return datetime.now(timezone.utc) + timedelta(minutes=5)
def ten_minutes_into_past_naive():
return datetime.now() - timedelta(minutes=10)
def five_minutes_into_past_naive():
return datetime.now() - timedelta(minutes=5)
def ten_minutes_into_past_naive_utc():
return datetime.utcnow() - timedelta(minutes=10)
def five_minutes_into_past_naive_utc():
return datetime.utcnow() - timedelta(minutes=5)
def ten_minutes_into_past():
return datetime.now(timezone.utc) - timedelta(minutes=10)
def five_minutes_into_past():
return datetime.now(timezone.utc) - timedelta(minutes=5)
class NearFuture(datetime):
@classmethod
def now(cls, tz=None):
return datetime.now(tz) + timedelta(hours=13)
return datetime.now(tz) + timedelta(minutes=13)
@classmethod
def utcnow(cls):
return datetime.utcnow() + timedelta(hours=13)
return datetime.utcnow() + timedelta(minutes=13)
def read_json(file_path):
@ -69,10 +67,6 @@ def generate_backup_query(query_array):
return "query TestBackup {\n backup {" + "\n".join(query_array) + "}\n}"
def generate_service_query(query_array):
return "query TestService {\n services {" + "\n".join(query_array) + "}\n}"
def mnemonic_to_hex(mnemonic):
return Mnemonic(language="english").to_entropy(mnemonic).hex()
@ -81,12 +75,3 @@ def assert_recovery_recent(time_generated: str):
assert datetime.fromisoformat(time_generated) - timedelta(seconds=5) < datetime.now(
timezone.utc
)
def assert_job_errored(job: Job):
assert job is not None
assert job.status == JobStatus.ERROR
# consider adding a useful error message to an errored-out job
assert job.error is not None
assert job.error != ""

View File

@ -3,28 +3,24 @@
# pylint: disable=unused-argument
import os
import pytest
import datetime
import subprocess
from os import path
from os import makedirs
from typing import Generator
from fastapi.testclient import TestClient
import os.path as path
import datetime
from selfprivacy_api.models.tokens.token import Token
from selfprivacy_api.utils.huey import huey
import selfprivacy_api.services as services
from selfprivacy_api.services import get_service_by_id, Service
from selfprivacy_api.services.test_service import DummyService
from selfprivacy_api.repositories.tokens.json_tokens_repository import (
JsonTokensRepository,
)
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
RedisTokensRepository,
)
from tests.common import read_json
EMPTY_TOKENS_JSON = ' {"tokens": []}'
TESTFILE_BODY = "testytest!"
TESTFILE_2_BODY = "testissimo!"
TOKENS_FILE_CONTENTS = {
"tokens": [
@ -41,19 +37,6 @@ TOKENS_FILE_CONTENTS = {
]
}
TOKENS = [
Token(
token="TEST_TOKEN",
device_name="test_token",
created_at=datetime.datetime(2022, 1, 14, 8, 31, 10, 789314),
),
Token(
token="TEST_TOKEN2",
device_name="test_token2",
created_at=datetime.datetime(2022, 1, 14, 8, 31, 10, 789314),
),
]
DEVICE_WE_AUTH_TESTS_WITH = TOKENS_FILE_CONTENTS["tokens"][0]
@ -65,6 +48,25 @@ def global_data_dir():
return path.join(path.dirname(__file__), "data")
@pytest.fixture
def empty_tokens(mocker, tmpdir):
tokenfile = tmpdir / "empty_tokens.json"
with open(tokenfile, "w") as file:
file.write(EMPTY_TOKENS_JSON)
mocker.patch("selfprivacy_api.utils.TOKENS_FILE", new=tokenfile)
assert read_json(tokenfile)["tokens"] == []
return tmpdir
@pytest.fixture
def empty_json_repo(empty_tokens):
repo = JsonTokensRepository()
for token in repo.get_tokens():
repo.delete_token(token)
assert repo.get_tokens() == []
return repo
@pytest.fixture
def empty_redis_repo():
repo = RedisTokensRepository()
@ -74,14 +76,25 @@ def empty_redis_repo():
@pytest.fixture
def redis_repo_with_tokens():
repo = RedisTokensRepository()
repo.reset()
for token in TOKENS:
repo._store_token(token)
assert sorted(repo.get_tokens(), key=lambda x: x.token) == sorted(
TOKENS, key=lambda x: x.token
)
def tokens_file(empty_redis_repo, tmpdir):
"""A state with tokens"""
repo = empty_redis_repo
for token in TOKENS_FILE_CONTENTS["tokens"]:
repo._store_token(
Token(
token=token["token"],
device_name=token["name"],
created_at=token["date"],
)
)
return repo
@pytest.fixture
def jobs_file(mocker, shared_datadir):
"""Mock tokens file."""
mock = mocker.patch("selfprivacy_api.utils.JOBS_FILE", shared_datadir / "jobs.json")
return mock
@pytest.fixture
@ -99,14 +112,23 @@ def generic_userdata(mocker, tmpdir):
@pytest.fixture
def client(redis_repo_with_tokens):
def huey_database(mocker, shared_datadir):
"""Mock huey database."""
mock = mocker.patch(
"selfprivacy_api.utils.huey.HUEY_DATABASE", shared_datadir / "huey.db"
)
return mock
@pytest.fixture
def client(tokens_file, huey_database, jobs_file):
from selfprivacy_api.app import app
return TestClient(app)
@pytest.fixture
def authorized_client(redis_repo_with_tokens):
def authorized_client(tokens_file, huey_database, jobs_file):
"""Authorized test client fixture."""
from selfprivacy_api.app import app
@ -118,97 +140,10 @@ def authorized_client(redis_repo_with_tokens):
@pytest.fixture
def wrong_auth_client(redis_repo_with_tokens):
def wrong_auth_client(tokens_file, huey_database, jobs_file):
"""Wrong token test client fixture."""
from selfprivacy_api.app import app
client = TestClient(app)
client.headers.update({"Authorization": "Bearer WRONG_TOKEN"})
return client
@pytest.fixture()
def volume_folders(tmpdir, mocker):
volumes_dir = path.join(tmpdir, "volumes")
makedirs(volumes_dir)
volumenames = ["sda1", "sda2"]
for d in volumenames:
service_dir = path.join(volumes_dir, d)
makedirs(service_dir)
mock = mocker.patch("selfprivacy_api.services.owned_path.VOLUMES_PATH", volumes_dir)
@pytest.fixture()
def raw_dummy_service(tmpdir) -> DummyService:
dirnames = ["test_service", "also_test_service"]
service_dirs = []
for d in dirnames:
service_dir = path.join(tmpdir, d)
makedirs(service_dir)
service_dirs.append(service_dir)
testfile_path_1 = path.join(service_dirs[0], "testfile.txt")
with open(testfile_path_1, "w") as file:
file.write(TESTFILE_BODY)
testfile_path_2 = path.join(service_dirs[1], "testfile2.txt")
with open(testfile_path_2, "w") as file:
file.write(TESTFILE_2_BODY)
# we need this to not change get_folders() much
class TestDummyService(DummyService, folders=service_dirs):
pass
service = TestDummyService()
# assert pickle.dumps(service) is not None
return service
def ensure_user_exists(user: str):
try:
output = subprocess.check_output(
["useradd", "-U", user], stderr=subprocess.PIPE, shell=False
)
except subprocess.CalledProcessError as error:
if b"already exists" not in error.stderr:
raise error
try:
output = subprocess.check_output(
["useradd", user], stderr=subprocess.PIPE, shell=False
)
except subprocess.CalledProcessError as error:
assert b"already exists" in error.stderr
return
raise ValueError("could not create user", user)
@pytest.fixture()
def dummy_service(
tmpdir, raw_dummy_service, generic_userdata
) -> Generator[Service, None, None]:
service = raw_dummy_service
user = service.get_user()
# TODO: use create_user from users actions. But it will need NIXOS to be there
# and react to our changes to files.
# from selfprivacy_api.actions.users import create_user
# create_user(user, "yay, it is me")
ensure_user_exists(user)
# register our service
services.services.append(service)
huey.immediate = True
assert huey.immediate is True
assert get_service_by_id(service.get_id()) is not None
service.enable()
yield service
# Cleanup because apparently it matters wrt tasks
# Some tests may remove it from the list intentionally, this is fine
if service in services.services:
services.services.remove(service)

1
tests/data/jobs.json Normal file
View File

@ -0,0 +1 @@
{}

14
tests/data/tokens.json Normal file
View File

@ -0,0 +1,14 @@
{
"tokens": [
{
"token": "TEST_TOKEN",
"name": "test_token",
"date": "2022-01-14 08:31:10.789314"
},
{
"token": "TEST_TOKEN2",
"name": "test_token2",
"date": "2022-01-14 08:31:10.789314"
}
]
}

View File

@ -1,81 +1,60 @@
{
"dns": {
"provider": "CLOUDFLARE",
"useStagingACME": false
"api": {
"token": "TEST_TOKEN",
"enableSwagger": false
},
"server": {
"provider": "HETZNER"
"bitwarden": {
"enable": true
},
"domain": "test-domain.tld",
"databasePassword": "PASSWORD",
"domain": "test.tld",
"hashedMasterPassword": "HASHED_PASSWORD",
"hostname": "test-instance",
"timezone": "Etc/UTC",
"username": "tester",
"useBinds": true,
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"users": [
{
"username": "user1",
"hashedPassword": "HASHED_PASSWORD_1",
"sshKeys": ["ssh-rsa KEY user1@pc"]
},
{
"username": "user2",
"hashedPassword": "HASHED_PASSWORD_2",
"sshKeys": ["ssh-rsa KEY user2@pc"]
},
{
"username": "user3",
"hashedPassword": "HASHED_PASSWORD_3",
"sshKeys": ["ssh-rsa KEY user3@pc"]
}
],
"autoUpgrade": {
"enable": true,
"allowReboot": true
"nextcloud": {
"adminPassword": "ADMIN",
"databasePassword": "ADMIN",
"enable": true
},
"modules": {
"bitwarden": {
"enable": true,
"location": "sdb"
},
"gitea": {
"enable": true,
"location": "sdb"
},
"jitsi-meet": {
"enable": true
},
"nextcloud": {
"enable": true,
"location": "sdb"
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true,
"location": "sdb"
},
"simple-nixos-mailserver": {
"enable": true,
"location": "sdb"
}
},
"volumes": [
{
"device": "/dev/sdb",
"mountPoint": "/volumes/sdb",
"fsType": "ext4"
}
],
"resticPassword": "PASS",
"ssh": {
"enable": true,
"passwordAuthentication": true,
"rootKeys": [
"ssh-ed25519 KEY test@pc"
]
},
"username": "tester",
"gitea": {
"enable": true
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true
},
"jitsi": {
"enable": true
},
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"timezone": "Europe/Moscow",
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"dns": {
"provider": "CLOUDFLARE",
"apiKey": "TOKEN"
},
"server": {
"provider": "HETZNER"
},
"backup": {
"provider": "BACKBLAZE",
"accountId": "ID",
"accountKey": "KEY",
"bucket": "selfprivacy"
}
}

View File

@ -1,567 +0,0 @@
import pytest
from copy import copy
from datetime import datetime, timezone, timedelta
from selfprivacy_api.jobs import Jobs
from selfprivacy_api.services import Service, get_all_services
from selfprivacy_api.graphql.common_types.backup import (
BackupReason,
AutobackupQuotas,
)
from selfprivacy_api.backup import Backups, Snapshot
from selfprivacy_api.backup.tasks import (
prune_autobackup_snapshots,
do_autobackup,
)
from selfprivacy_api.backup.jobs import autobackup_job_type
from tests.test_backup import backups, assert_job_finished
from tests.test_graphql.test_services import only_dummy_service
def backuppable_services() -> list[Service]:
return [service for service in get_all_services() if service.can_be_backed_up()]
def dummy_snapshot(date: datetime):
return Snapshot(
id=str(hash(date)),
service_name="someservice",
created_at=date,
reason=BackupReason.EXPLICIT,
)
def test_no_default_autobackup(backups, dummy_service):
now = datetime.now(timezone.utc)
assert not Backups.is_time_to_backup_service(dummy_service, now)
assert not Backups.is_time_to_backup(now)
# --------------------- Timing -------------------------
def test_set_autobackup_period(backups):
assert Backups.autobackup_period_minutes() is None
Backups.set_autobackup_period_minutes(2)
assert Backups.autobackup_period_minutes() == 2
Backups.disable_all_autobackup()
assert Backups.autobackup_period_minutes() is None
Backups.set_autobackup_period_minutes(3)
assert Backups.autobackup_period_minutes() == 3
Backups.set_autobackup_period_minutes(0)
assert Backups.autobackup_period_minutes() is None
Backups.set_autobackup_period_minutes(3)
assert Backups.autobackup_period_minutes() == 3
Backups.set_autobackup_period_minutes(-1)
assert Backups.autobackup_period_minutes() is None
def test_autobackup_taskbody(backups, only_dummy_service):
# We cannot test the timed task itself, but we reduced it
# to one line, and we test this line here
dummy_service = only_dummy_service
now = datetime.now(timezone.utc)
backup_period = 13 # minutes
assert Backups.get_all_snapshots() == []
assert_job_finished(autobackup_job_type(), count=0)
Backups.set_autobackup_period_minutes(backup_period)
assert Backups.is_time_to_backup_service(dummy_service, now)
assert Backups.is_time_to_backup(now)
assert dummy_service in Backups.services_to_back_up(now)
assert len(Backups.services_to_back_up(now)) == 1
do_autobackup()
snapshots = Backups.get_all_snapshots()
assert len(snapshots) == 1
assert snapshots[0].service_name == dummy_service.get_id()
assert snapshots[0].reason == BackupReason.AUTO
assert_job_finished(autobackup_job_type(), count=1)
def test_autobackup_timer_periods(backups, dummy_service):
now = datetime.now(timezone.utc)
backup_period = 13 # minutes
assert not Backups.is_time_to_backup_service(dummy_service, now)
assert not Backups.is_time_to_backup(now)
Backups.set_autobackup_period_minutes(backup_period)
assert Backups.is_time_to_backup_service(dummy_service, now)
assert Backups.is_time_to_backup(now)
Backups.set_autobackup_period_minutes(0)
assert not Backups.is_time_to_backup_service(dummy_service, now)
assert not Backups.is_time_to_backup(now)
def test_autobackup_timer_enabling(backups, dummy_service):
now = datetime.now(timezone.utc)
backup_period = 13 # minutes
dummy_service.set_backuppable(False)
Backups.set_autobackup_period_minutes(backup_period)
assert Backups.is_time_to_backup(
now
) # there are other services too, not just our dummy
# not backuppable service is not backuppable even if period is set
assert not Backups.is_time_to_backup_service(dummy_service, now)
dummy_service.set_backuppable(True)
assert dummy_service.can_be_backed_up()
assert Backups.is_time_to_backup_service(dummy_service, now)
Backups.disable_all_autobackup()
assert not Backups.is_time_to_backup_service(dummy_service, now)
assert not Backups.is_time_to_backup(now)
def test_autobackup_timing(backups, dummy_service):
backup_period = 13 # minutes
now = datetime.now(timezone.utc)
Backups.set_autobackup_period_minutes(backup_period)
assert Backups.is_time_to_backup_service(dummy_service, now)
assert Backups.is_time_to_backup(now)
Backups.back_up(dummy_service)
now = datetime.now(timezone.utc)
assert not Backups.is_time_to_backup_service(dummy_service, now)
past = datetime.now(timezone.utc) - timedelta(minutes=1)
assert not Backups.is_time_to_backup_service(dummy_service, past)
future = datetime.now(timezone.utc) + timedelta(minutes=backup_period + 2)
assert Backups.is_time_to_backup_service(dummy_service, future)
# --------------------- What to autobackup and what not to --------------------
def test_services_to_autobackup(backups, dummy_service):
backup_period = 13 # minutes
now = datetime.now(timezone.utc)
dummy_service.set_backuppable(False)
services = Backups.services_to_back_up(now)
assert len(services) == 0
dummy_service.set_backuppable(True)
services = Backups.services_to_back_up(now)
assert len(services) == 0
Backups.set_autobackup_period_minutes(backup_period)
services = Backups.services_to_back_up(now)
assert len(services) == len(backuppable_services())
assert dummy_service.get_id() in [
service.get_id() for service in backuppable_services()
]
def test_do_not_autobackup_disabled_services(backups, dummy_service):
now = datetime.now(timezone.utc)
Backups.set_autobackup_period_minutes(3)
assert Backups.is_time_to_backup_service(dummy_service, now) is True
dummy_service.disable()
assert Backups.is_time_to_backup_service(dummy_service, now) is False
def test_failed_autoback_prevents_more_autobackup(backups, dummy_service):
backup_period = 13 # minutes
now = datetime.now(timezone.utc)
Backups.set_autobackup_period_minutes(backup_period)
assert Backups.is_time_to_backup_service(dummy_service, now)
# artificially making an errored out backup job
dummy_service.set_backuppable(False)
with pytest.raises(ValueError):
Backups.back_up(dummy_service)
dummy_service.set_backuppable(True)
assert Backups.get_last_backed_up(dummy_service) is None
assert Backups.get_last_backup_error_time(dummy_service) is not None
assert Backups.is_time_to_backup_service(dummy_service, now) is False
# --------------------- Quotas and Pruning -------------------------
unlimited_quotas = AutobackupQuotas(
last=-1,
daily=-1,
weekly=-1,
monthly=-1,
yearly=-1,
)
zero_quotas = AutobackupQuotas(
last=0,
daily=0,
weekly=0,
monthly=0,
yearly=0,
)
unlimited_quotas = AutobackupQuotas(
last=-1,
daily=-1,
weekly=-1,
monthly=-1,
yearly=-1,
)
zero_quotas = AutobackupQuotas(
last=0,
daily=0,
weekly=0,
monthly=0,
yearly=0,
)
def test_get_empty_quotas(backups):
quotas = Backups.autobackup_quotas()
assert quotas is not None
assert quotas == unlimited_quotas
def test_set_quotas(backups):
quotas = AutobackupQuotas(
last=3,
daily=2343,
weekly=343,
monthly=0,
yearly=-34556,
)
Backups.set_autobackup_quotas(quotas)
assert Backups.autobackup_quotas() == AutobackupQuotas(
last=3,
daily=2343,
weekly=343,
monthly=0,
yearly=-1,
)
def test_set_zero_quotas(backups):
quotas = AutobackupQuotas(
last=0,
daily=0,
weekly=0,
monthly=0,
yearly=0,
)
Backups.set_autobackup_quotas(quotas)
assert Backups.autobackup_quotas() == zero_quotas
def test_set_unlimited_quotas(backups):
quotas = AutobackupQuotas(
last=-1,
daily=-1,
weekly=-1,
monthly=-1,
yearly=-1,
)
Backups.set_autobackup_quotas(quotas)
assert Backups.autobackup_quotas() == unlimited_quotas
def test_set_zero_quotas_after_unlimited(backups):
quotas = AutobackupQuotas(
last=-1,
daily=-1,
weekly=-1,
monthly=-1,
yearly=-1,
)
Backups.set_autobackup_quotas(quotas)
assert Backups.autobackup_quotas() == unlimited_quotas
quotas = AutobackupQuotas(
last=0,
daily=0,
weekly=0,
monthly=0,
yearly=0,
)
Backups.set_autobackup_quotas(quotas)
assert Backups.autobackup_quotas() == zero_quotas
def test_autobackup_snapshots_pruning(backups):
# Wednesday, fourth week
now = datetime(year=2023, month=1, day=25, hour=10)
snaps = [
dummy_snapshot(now),
dummy_snapshot(now - timedelta(minutes=5)),
dummy_snapshot(now - timedelta(hours=2)),
dummy_snapshot(now - timedelta(hours=5)),
dummy_snapshot(now - timedelta(days=1)),
dummy_snapshot(now - timedelta(days=1, hours=2)),
dummy_snapshot(now - timedelta(days=1, hours=3)),
dummy_snapshot(now - timedelta(days=2)),
dummy_snapshot(now - timedelta(days=7)),
dummy_snapshot(now - timedelta(days=12)),
dummy_snapshot(now - timedelta(days=23)),
dummy_snapshot(now - timedelta(days=28)),
dummy_snapshot(now - timedelta(days=32)),
dummy_snapshot(now - timedelta(days=47)),
dummy_snapshot(now - timedelta(days=64)),
dummy_snapshot(now - timedelta(days=84)),
dummy_snapshot(now - timedelta(days=104)),
dummy_snapshot(now - timedelta(days=365 * 2)),
]
old_len = len(snaps)
quotas = copy(unlimited_quotas)
Backups.set_autobackup_quotas(quotas)
assert Backups._prune_snaps_with_quotas(snaps) == snaps
quotas = copy(zero_quotas)
quotas.last = 2
quotas.daily = 2
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(now),
dummy_snapshot(now - timedelta(minutes=5)),
# dummy_snapshot(now - timedelta(hours=2)),
# dummy_snapshot(now - timedelta(hours=5)),
dummy_snapshot(now - timedelta(days=1)),
# dummy_snapshot(now - timedelta(days=1, hours=2)),
# dummy_snapshot(now - timedelta(days=1, hours=3)),
# dummy_snapshot(now - timedelta(days=2)),
# dummy_snapshot(now - timedelta(days=7)),
# dummy_snapshot(now - timedelta(days=12)),
# dummy_snapshot(now - timedelta(days=23)),
# dummy_snapshot(now - timedelta(days=28)),
# dummy_snapshot(now - timedelta(days=32)),
# dummy_snapshot(now - timedelta(days=47)),
# dummy_snapshot(now - timedelta(days=64)),
# dummy_snapshot(now - timedelta(days=84)),
# dummy_snapshot(now - timedelta(days=104)),
# dummy_snapshot(now - timedelta(days=365 * 2)),
]
# checking that this function does not mutate the argument
assert snaps != snaps_to_keep
assert len(snaps) == old_len
quotas = copy(zero_quotas)
quotas.weekly = 4
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(now),
# dummy_snapshot(now - timedelta(minutes=5)),
# dummy_snapshot(now - timedelta(hours=2)),
# dummy_snapshot(now - timedelta(hours=5)),
# dummy_snapshot(now - timedelta(days=1)),
# dummy_snapshot(now - timedelta(days=1, hours=2)),
# dummy_snapshot(now - timedelta(days=1, hours=3)),
# dummy_snapshot(now - timedelta(days=2)),
dummy_snapshot(now - timedelta(days=7)),
dummy_snapshot(now - timedelta(days=12)),
dummy_snapshot(now - timedelta(days=23)),
# dummy_snapshot(now - timedelta(days=28)),
# dummy_snapshot(now - timedelta(days=32)),
# dummy_snapshot(now - timedelta(days=47)),
# dummy_snapshot(now - timedelta(days=64)),
# dummy_snapshot(now - timedelta(days=84)),
# dummy_snapshot(now - timedelta(days=104)),
# dummy_snapshot(now - timedelta(days=365 * 2)),
]
quotas = copy(zero_quotas)
quotas.monthly = 7
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(now),
# dummy_snapshot(now - timedelta(minutes=5)),
# dummy_snapshot(now - timedelta(hours=2)),
# dummy_snapshot(now - timedelta(hours=5)),
# dummy_snapshot(now - timedelta(days=1)),
# dummy_snapshot(now - timedelta(days=1, hours=2)),
# dummy_snapshot(now - timedelta(days=1, hours=3)),
# dummy_snapshot(now - timedelta(days=2)),
# dummy_snapshot(now - timedelta(days=7)),
# dummy_snapshot(now - timedelta(days=12)),
# dummy_snapshot(now - timedelta(days=23)),
dummy_snapshot(now - timedelta(days=28)),
# dummy_snapshot(now - timedelta(days=32)),
# dummy_snapshot(now - timedelta(days=47)),
dummy_snapshot(now - timedelta(days=64)),
# dummy_snapshot(now - timedelta(days=84)),
dummy_snapshot(now - timedelta(days=104)),
dummy_snapshot(now - timedelta(days=365 * 2)),
]
def test_autobackup_snapshots_pruning_yearly(backups):
snaps = [
dummy_snapshot(datetime(year=2055, month=3, day=1)),
dummy_snapshot(datetime(year=2055, month=2, day=1)),
dummy_snapshot(datetime(year=2023, month=4, day=1)),
dummy_snapshot(datetime(year=2023, month=3, day=1)),
dummy_snapshot(datetime(year=2023, month=2, day=1)),
dummy_snapshot(datetime(year=2021, month=2, day=1)),
]
quotas = copy(zero_quotas)
quotas.yearly = 2
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(datetime(year=2055, month=3, day=1)),
dummy_snapshot(datetime(year=2023, month=4, day=1)),
]
def test_autobackup_snapshots_pruning_bottleneck(backups):
now = datetime(year=2023, month=1, day=25, hour=10)
snaps = [
dummy_snapshot(now),
dummy_snapshot(now - timedelta(minutes=5)),
dummy_snapshot(now - timedelta(hours=2)),
dummy_snapshot(now - timedelta(hours=3)),
dummy_snapshot(now - timedelta(hours=4)),
]
yearly_quota = copy(zero_quotas)
yearly_quota.yearly = 2
monthly_quota = copy(zero_quotas)
monthly_quota.monthly = 2
weekly_quota = copy(zero_quotas)
weekly_quota.weekly = 2
daily_quota = copy(zero_quotas)
daily_quota.daily = 2
last_quota = copy(zero_quotas)
last_quota.last = 1
last_quota.yearly = 2
for quota in [last_quota, yearly_quota, monthly_quota, weekly_quota, daily_quota]:
print(quota)
Backups.set_autobackup_quotas(quota)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(now),
# If there is a vacant quota, we should keep the last snapshot even if it doesn't fit
dummy_snapshot(now - timedelta(hours=4)),
]
def test_autobackup_snapshots_pruning_edgeweek(backups):
# jan 1 2023 is Sunday
snaps = [
dummy_snapshot(datetime(year=2023, month=1, day=6)),
dummy_snapshot(datetime(year=2023, month=1, day=1)),
dummy_snapshot(datetime(year=2022, month=12, day=31)),
dummy_snapshot(datetime(year=2022, month=12, day=30)),
]
quotas = copy(zero_quotas)
quotas.weekly = 2
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(datetime(year=2023, month=1, day=6)),
dummy_snapshot(datetime(year=2023, month=1, day=1)),
]
def test_autobackup_snapshots_pruning_big_gap(backups):
snaps = [
dummy_snapshot(datetime(year=2023, month=1, day=6)),
dummy_snapshot(datetime(year=2023, month=1, day=2)),
dummy_snapshot(datetime(year=2022, month=10, day=31)),
dummy_snapshot(datetime(year=2022, month=10, day=30)),
]
quotas = copy(zero_quotas)
quotas.weekly = 2
Backups.set_autobackup_quotas(quotas)
snaps_to_keep = Backups._prune_snaps_with_quotas(snaps)
assert snaps_to_keep == [
dummy_snapshot(datetime(year=2023, month=1, day=6)),
dummy_snapshot(datetime(year=2022, month=10, day=31)),
]
def test_quotas_exceeded_with_too_many_autobackups(backups, dummy_service):
assert Backups.autobackup_quotas()
quota = copy(zero_quotas)
quota.last = 2
Backups.set_autobackup_quotas(quota)
assert Backups.autobackup_quotas().last == 2
snap = Backups.back_up(dummy_service, BackupReason.AUTO)
assert len(Backups.get_snapshots(dummy_service)) == 1
snap2 = Backups.back_up(dummy_service, BackupReason.AUTO)
assert len(Backups.get_snapshots(dummy_service)) == 2
snap3 = Backups.back_up(dummy_service, BackupReason.AUTO)
assert len(Backups.get_snapshots(dummy_service)) == 2
snaps = Backups.get_snapshots(dummy_service)
assert snap2 in snaps
assert snap3 in snaps
assert snap not in snaps
quota.last = -1
Backups.set_autobackup_quotas(quota)
snap4 = Backups.back_up(dummy_service, BackupReason.AUTO)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 3
assert snap4 in snaps
# Retroactivity
quota.last = 1
Backups.set_autobackup_quotas(quota)
job = Jobs.add("trimming", "test.autobackup_trimming", "trimming the snaps!")
handle = prune_autobackup_snapshots(job)
handle(blocking=True)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 1
snap5 = Backups.back_up(dummy_service, BackupReason.AUTO)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 1
assert snap5 in snaps
# Explicit snaps are not affected
snap6 = Backups.back_up(dummy_service, BackupReason.EXPLICIT)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 2
assert snap5 in snaps
assert snap6 in snaps

View File

@ -1,793 +0,0 @@
import pytest
import os
import os.path as path
from os import remove
from os import listdir
from os import urandom
from datetime import datetime, timedelta, timezone
import tempfile
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.services.service import ServiceStatus
from selfprivacy_api.graphql.queries.providers import BackupProvider as ProviderEnum
from selfprivacy_api.graphql.common_types.backup import (
RestoreStrategy,
BackupReason,
)
from selfprivacy_api.graphql.queries.providers import BackupProvider
from selfprivacy_api.jobs import Job, Jobs, JobStatus
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.backup import Backups, BACKUP_PROVIDER_ENVS
import selfprivacy_api.backup.providers as providers
from selfprivacy_api.backup.providers import AbstractBackupProvider
from selfprivacy_api.backup.providers.backblaze import Backblaze
from selfprivacy_api.backup.providers.none import NoBackups
from selfprivacy_api.backup.providers import get_kind
from selfprivacy_api.backup.util import sync
from selfprivacy_api.backup.tasks import (
start_backup,
restore_snapshot,
reload_snapshot_cache,
)
from selfprivacy_api.backup.storage import Storage
from selfprivacy_api.backup.local_secret import LocalBackupSecret
from selfprivacy_api.backup.jobs import get_backup_fail
from tests.common import assert_job_errored
REPO_NAME = "test_backup"
REPOFILE_NAME = "totallyunrelated"
def prepare_localfile_backups(temp_dir):
test_repo_path = path.join(temp_dir, REPOFILE_NAME)
assert not path.exists(test_repo_path)
Backups.set_localfile_repo(test_repo_path)
@pytest.fixture(scope="function")
def backups_local(tmpdir):
Backups.reset()
prepare_localfile_backups(tmpdir)
Jobs.reset()
Backups.init_repo()
@pytest.fixture(scope="function")
def backups(tmpdir):
"""
For those tests that are supposed to pass with
both local and cloud repos
"""
# Sometimes this is false. Idk why.
huey.immediate = True
assert huey.immediate is True
Backups.reset()
if BACKUP_PROVIDER_ENVS["kind"] in os.environ.keys():
Backups.set_provider_from_envs()
else:
prepare_localfile_backups(tmpdir)
Jobs.reset()
Backups.init_repo()
assert Backups.provider().location == str(tmpdir) + "/" + REPOFILE_NAME
yield
Backups.erase_repo()
@pytest.fixture()
def memory_backup() -> AbstractBackupProvider:
ProviderClass = providers.get_provider(BackupProvider.MEMORY)
assert ProviderClass is not None
memory_provider = ProviderClass(login="", key="")
assert memory_provider is not None
return memory_provider
@pytest.fixture()
def file_backup(tmpdir) -> AbstractBackupProvider:
test_repo_path = path.join(tmpdir, "test_repo")
ProviderClass = providers.get_provider(BackupProvider.FILE)
assert ProviderClass is not None
provider = ProviderClass(location=test_repo_path)
assert provider is not None
return provider
def test_reset_sets_to_none1():
Backups.reset()
provider = Backups.provider()
assert provider is not None
assert isinstance(provider, NoBackups)
def test_reset_sets_to_none2(backups):
# now with something set up first^^^
Backups.reset()
provider = Backups.provider()
assert provider is not None
assert isinstance(provider, NoBackups)
def test_setting_from_envs(tmpdir):
Backups.reset()
environment_stash = {}
if BACKUP_PROVIDER_ENVS["kind"] in os.environ.keys():
# we are running under special envs, stash them before rewriting them
for key in BACKUP_PROVIDER_ENVS.values():
environment_stash[key] = os.environ[key]
os.environ[BACKUP_PROVIDER_ENVS["kind"]] = "BACKBLAZE"
os.environ[BACKUP_PROVIDER_ENVS["login"]] = "ID"
os.environ[BACKUP_PROVIDER_ENVS["key"]] = "KEY"
os.environ[BACKUP_PROVIDER_ENVS["location"]] = "selfprivacy"
Backups.set_provider_from_envs()
provider = Backups.provider()
assert provider is not None
assert isinstance(provider, Backblaze)
assert provider.login == "ID"
assert provider.key == "KEY"
assert provider.location == "selfprivacy"
assert provider.backupper.account == "ID"
assert provider.backupper.key == "KEY"
if environment_stash != {}:
for key in BACKUP_PROVIDER_ENVS.values():
os.environ[key] = environment_stash[key]
else:
for key in BACKUP_PROVIDER_ENVS.values():
del os.environ[key]
def test_select_backend():
provider = providers.get_provider(BackupProvider.BACKBLAZE)
assert provider is not None
assert provider == Backblaze
def test_file_backend_init(file_backup):
file_backup.backupper.init()
def test_reinit_after_purge(backups):
assert Backups.is_initted() is True
Backups.erase_repo()
assert Backups.is_initted() is False
with pytest.raises(ValueError):
Backups.force_snapshot_cache_reload()
Backups.init_repo()
assert Backups.is_initted() is True
assert len(Backups.get_all_snapshots()) == 0
def test_backup_service(dummy_service, backups):
id = dummy_service.get_id()
assert_job_finished(f"services.{id}.backup", count=0)
assert Backups.get_last_backed_up(dummy_service) is None
Backups.back_up(dummy_service)
now = datetime.now(timezone.utc)
date = Backups.get_last_backed_up(dummy_service)
assert date is not None
assert now > date
assert now - date < timedelta(minutes=1)
assert_job_finished(f"services.{id}.backup", count=1)
def all_job_text(job: Job) -> str:
# Use when we update to pydantic 2.xxx
# return Job.model_dump_json()
result = ""
if job.status_text is not None:
result += job.status_text
if job.description is not None:
result += job.description
if job.error is not None:
result += job.error
return result
def test_error_censoring_encryptionkey(dummy_service, backups):
# Discard our key to inject a failure
old_key = LocalBackupSecret.get()
LocalBackupSecret.reset()
new_key = LocalBackupSecret.get()
with pytest.raises(ValueError):
# Should fail without correct key
Backups.back_up(dummy_service)
job = get_backup_fail(dummy_service)
assert_job_errored(job)
job_text = all_job_text(job)
assert old_key not in job_text
assert new_key not in job_text
# local backups do not have login key
# assert Backups.provider().key not in job_text
assert "CENSORED" in job_text
def test_error_censoring_loginkey(dummy_service, backups, fp):
# We do not want to screw up our teardown
old_provider = Backups.provider()
secret = "aSecretNYA"
Backups.set_provider(
ProviderEnum.BACKBLAZE, login="meow", key=secret, location="moon"
)
assert Backups.provider().key == secret
# We could have called real backblaze but it is kind of not privacy so.
fp.allow_unregistered(True)
fp.register(
["restic", fp.any()],
returncode=1,
stdout="only real cats are allowed",
# We do not want to suddenly call real backblaze even if code changes
occurrences=100,
)
with pytest.raises(ValueError):
Backups.back_up(dummy_service)
job = get_backup_fail(dummy_service)
assert_job_errored(job)
job_text = all_job_text(job)
assert secret not in job_text
assert job_text.count("CENSORED") == 2
# We do not want to screw up our teardown
Storage.store_provider(old_provider)
def test_no_repo(memory_backup):
with pytest.raises(ValueError):
assert memory_backup.backupper.get_snapshots() == []
def test_one_snapshot(backups, dummy_service):
Backups.back_up(dummy_service)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 1
snap = snaps[0]
assert snap.service_name == dummy_service.get_id()
def test_backup_returns_snapshot(backups, dummy_service):
service_folders = dummy_service.get_folders()
provider = Backups.provider()
name = dummy_service.get_id()
snapshot = provider.backupper.start_backup(service_folders, name)
assert snapshot.id is not None
snapshots = provider.backupper.get_snapshots()
assert snapshots != []
assert len(snapshot.id) == len(snapshots[0].id)
assert Backups.get_snapshot_by_id(snapshot.id) is not None
assert snapshot.service_name == name
assert snapshot.created_at is not None
assert snapshot.reason == BackupReason.EXPLICIT
def test_backup_reasons(backups, dummy_service):
snap = Backups.back_up(dummy_service, BackupReason.AUTO)
assert snap.reason == BackupReason.AUTO
Backups.force_snapshot_cache_reload()
snaps = Backups.get_snapshots(dummy_service)
assert snaps[0].reason == BackupReason.AUTO
def folder_files(folder):
return [
path.join(folder, filename)
for filename in listdir(folder)
if filename is not None
]
def service_files(service):
result = []
for service_folder in service.get_folders():
result.extend(folder_files(service_folder))
return result
def test_restore(backups, dummy_service):
paths_to_nuke = service_files(dummy_service)
contents = []
for service_file in paths_to_nuke:
with open(service_file, "r") as file:
contents.append(file.read())
Backups.back_up(dummy_service)
snap = Backups.get_snapshots(dummy_service)[0]
assert snap is not None
for p in paths_to_nuke:
assert path.exists(p)
remove(p)
assert not path.exists(p)
Backups._restore_service_from_snapshot(dummy_service, snap.id)
for p, content in zip(paths_to_nuke, contents):
assert path.exists(p)
with open(p, "r") as file:
assert file.read() == content
def test_sizing(backups, dummy_service):
Backups.back_up(dummy_service)
snap = Backups.get_snapshots(dummy_service)[0]
size = Backups.snapshot_restored_size(snap.id)
assert size is not None
assert size > 0
def test_init_tracking(backups, tmpdir):
assert Backups.is_initted() is True
Backups.reset()
assert Backups.is_initted() is False
separate_dir = tmpdir / "out_of_the_way"
prepare_localfile_backups(separate_dir)
Backups.init_repo()
assert Backups.is_initted() is True
def finished_jobs():
return [job for job in Jobs.get_jobs() if job.status is JobStatus.FINISHED]
def assert_job_finished(job_type, count):
finished_types = [job.type_id for job in finished_jobs()]
assert finished_types.count(job_type) == count
def assert_job_has_run(job_type):
job = [job for job in finished_jobs() if job.type_id == job_type][0]
assert JobStatus.RUNNING in Jobs.status_updates(job)
def job_progress_updates(job_type):
job = [job for job in finished_jobs() if job.type_id == job_type][0]
return Jobs.progress_updates(job)
def assert_job_had_progress(job_type):
assert len(job_progress_updates(job_type)) > 0
def make_large_file(path: str, bytes: int):
with open(path, "wb") as file:
file.write(urandom(bytes))
def test_snapshots_by_id(backups, dummy_service):
snap1 = Backups.back_up(dummy_service)
snap2 = Backups.back_up(dummy_service)
snap3 = Backups.back_up(dummy_service)
assert snap2.id is not None
assert snap2.id != ""
assert len(Backups.get_snapshots(dummy_service)) == 3
assert Backups.get_snapshot_by_id(snap2.id).id == snap2.id
@pytest.fixture(params=["instant_server_stop", "delayed_server_stop"])
def simulated_service_stopping_delay(request) -> float:
if request.param == "instant_server_stop":
return 0.0
else:
return 0.3
def test_backup_service_task(backups, dummy_service, simulated_service_stopping_delay):
dummy_service.set_delay(simulated_service_stopping_delay)
handle = start_backup(dummy_service.get_id())
handle(blocking=True)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 1
id = dummy_service.get_id()
job_type_id = f"services.{id}.backup"
assert_job_finished(job_type_id, count=1)
assert_job_has_run(job_type_id)
assert_job_had_progress(job_type_id)
def test_forget_snapshot(backups, dummy_service):
snap1 = Backups.back_up(dummy_service)
snap2 = Backups.back_up(dummy_service)
assert len(Backups.get_snapshots(dummy_service)) == 2
Backups.forget_snapshot(snap2)
assert len(Backups.get_snapshots(dummy_service)) == 1
Backups.force_snapshot_cache_reload()
assert len(Backups.get_snapshots(dummy_service)) == 1
assert Backups.get_snapshots(dummy_service)[0].id == snap1.id
Backups.forget_snapshot(snap1)
assert len(Backups.get_snapshots(dummy_service)) == 0
def test_forget_nonexistent_snapshot(backups, dummy_service):
bogus = Snapshot(
id="gibberjibber",
service_name="nohoho",
created_at=datetime.now(timezone.utc),
reason=BackupReason.EXPLICIT,
)
with pytest.raises(ValueError):
Backups.forget_snapshot(bogus)
def test_backup_larger_file(backups, dummy_service):
dir = path.join(dummy_service.get_folders()[0], "LARGEFILE")
mega = 2**20
make_large_file(dir, 100 * mega)
handle = start_backup(dummy_service.get_id())
handle(blocking=True)
# results will be slightly different on different machines. if someone has troubles with it on their machine, consider dropping this test.
id = dummy_service.get_id()
job_type_id = f"services.{id}.backup"
assert_job_finished(job_type_id, count=1)
assert_job_has_run(job_type_id)
updates = job_progress_updates(job_type_id)
assert len(updates) > 3
assert updates[int((len(updates) - 1) / 2.0)] > 10
# clean up a bit
remove(dir)
@pytest.fixture(params=["verify", "inplace"])
def restore_strategy(request) -> RestoreStrategy:
if request.param == "verify":
return RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
else:
return RestoreStrategy.INPLACE
@pytest.fixture(params=["failed", "healthy"])
def failed(request) -> bool:
if request.param == "failed":
return True
return False
def test_restore_snapshot_task(
backups, dummy_service, restore_strategy, simulated_service_stopping_delay, failed
):
dummy_service.set_delay(simulated_service_stopping_delay)
if failed:
dummy_service.set_status(ServiceStatus.FAILED)
Backups.back_up(dummy_service)
snaps = Backups.get_snapshots(dummy_service)
assert len(snaps) == 1
paths_to_nuke = service_files(dummy_service)
contents = []
for service_file in paths_to_nuke:
with open(service_file, "r") as file:
contents.append(file.read())
for p in paths_to_nuke:
remove(p)
handle = restore_snapshot(snaps[0], restore_strategy)
handle(blocking=True)
for p, content in zip(paths_to_nuke, contents):
assert path.exists(p)
with open(p, "r") as file:
assert file.read() == content
snaps = Backups.get_snapshots(dummy_service)
if restore_strategy == RestoreStrategy.INPLACE:
assert len(snaps) == 2
reasons = [snap.reason for snap in snaps]
assert BackupReason.PRE_RESTORE in reasons
else:
assert len(snaps) == 1
def test_backup_unbackuppable(backups, dummy_service):
dummy_service.set_backuppable(False)
assert dummy_service.can_be_backed_up() is False
with pytest.raises(ValueError):
Backups.back_up(dummy_service)
# Storage
def test_snapshots_caching(backups, dummy_service):
Backups.back_up(dummy_service)
# we test indirectly that we do redis calls instead of shell calls
start = datetime.now()
for i in range(10):
snapshots = Backups.get_snapshots(dummy_service)
assert len(snapshots) == 1
assert datetime.now() - start < timedelta(seconds=0.5)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
snap_to_uncache = cached_snapshots[0]
Storage.delete_cached_snapshot(snap_to_uncache)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 0
# We do not assume that no snapshots means we need to reload the cache
snapshots = Backups.get_snapshots(dummy_service)
assert len(snapshots) == 0
# No cache reload happened
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 0
# Storage
def test_snapshot_cache_autoreloads(backups, dummy_service):
Backups.back_up(dummy_service)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
snap_to_uncache = cached_snapshots[0]
Storage.delete_cached_snapshot(snap_to_uncache)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 0
# When we create a snapshot we do reload cache
Backups.back_up(dummy_service)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 2
assert snap_to_uncache in cached_snapshots
Storage.delete_cached_snapshot(snap_to_uncache)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
# When we try to delete a snapshot we cannot find in cache, it is ok and we do reload cache
Backups.forget_snapshot(snap_to_uncache)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
assert snap_to_uncache not in cached_snapshots
def lowlevel_forget(snapshot_id):
Backups.provider().backupper.forget_snapshot(snapshot_id)
# Storage
def test_snapshots_cache_invalidation(backups, dummy_service):
Backups.back_up(dummy_service)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
Storage.invalidate_snapshot_storage()
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 0
Backups.force_snapshot_cache_reload()
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
snap = cached_snapshots[0]
lowlevel_forget(snap.id)
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 1
Backups.force_snapshot_cache_reload()
cached_snapshots = Storage.get_cached_snapshots()
assert len(cached_snapshots) == 0
# Storage
def test_init_tracking_caching(backups, raw_dummy_service):
assert Storage.has_init_mark() is True
Backups.reset()
assert Storage.has_init_mark() is False
Storage.mark_as_init()
assert Storage.has_init_mark() is True
assert Backups.is_initted() is True
# Storage
def test_init_tracking_caching2(backups, tmpdir):
assert Storage.has_init_mark() is True
Backups.reset()
assert Storage.has_init_mark() is False
separate_dir = tmpdir / "out_of_the_way"
prepare_localfile_backups(separate_dir)
assert Storage.has_init_mark() is False
Backups.init_repo()
assert Storage.has_init_mark() is True
# Storage
def test_provider_storage(backups):
test_login = "ID"
test_key = "KEY"
test_location = "selprivacy_bin"
old_provider = Backups.provider()
assert old_provider is not None
assert not isinstance(old_provider, Backblaze)
assert old_provider.login != test_login
assert old_provider.key != test_key
assert old_provider.location != test_location
test_provider = Backups._construct_provider(
kind=BackupProvider.BACKBLAZE, login="ID", key=test_key, location=test_location
)
assert isinstance(test_provider, Backblaze)
assert get_kind(test_provider) == "BACKBLAZE"
assert test_provider.login == test_login
assert test_provider.key == test_key
assert test_provider.location == test_location
Storage.store_provider(test_provider)
restored_provider_model = Storage.load_provider()
assert restored_provider_model.kind == "BACKBLAZE"
assert restored_provider_model.login == test_login
assert restored_provider_model.key == test_key
assert restored_provider_model.location == test_location
restored_provider = Backups._load_provider_redis()
assert isinstance(restored_provider, Backblaze)
assert restored_provider.login == test_login
assert restored_provider.key == test_key
assert restored_provider.location == test_location
# Revert our mess so we can teardown ok
Storage.store_provider(old_provider)
def test_sync(dummy_service):
src = dummy_service.get_folders()[0]
dst = dummy_service.get_folders()[1]
old_files_src = set(listdir(src))
old_files_dst = set(listdir(dst))
assert old_files_src != old_files_dst
sync(src, dst)
new_files_src = set(listdir(src))
new_files_dst = set(listdir(dst))
assert new_files_src == old_files_src
assert new_files_dst == new_files_src
def test_sync_nonexistent_src(dummy_service):
src = "/var/lib/nonexistentFluffyBunniesOfUnix"
dst = dummy_service.get_folders()[1]
with pytest.raises(ValueError):
sync(src, dst)
def test_move_blocks_backups(backups, dummy_service, restore_strategy):
snap = Backups.back_up(dummy_service)
job = Jobs.add(
type_id=f"services.{dummy_service.get_id()}.move",
name="Move Dummy",
description=f"Moving Dummy data to the Rainbow Land",
status=JobStatus.RUNNING,
)
with pytest.raises(ValueError):
Backups.back_up(dummy_service)
with pytest.raises(ValueError):
Backups.restore_snapshot(snap, restore_strategy)
def test_double_lock_unlock(backups, dummy_service):
# notice that introducing stale locks is only safe for other tests if we erase repo in between
# which we do at the time of writing this test
Backups.provider().backupper.lock()
with pytest.raises(ValueError):
Backups.provider().backupper.lock()
Backups.provider().backupper.unlock()
Backups.provider().backupper.lock()
Backups.provider().backupper.unlock()
Backups.provider().backupper.unlock()
def test_operations_while_locked(backups, dummy_service):
# Stale lock prevention test
# consider making it fully at the level of backupper?
# because this is where prevention lives?
# Backups singleton is here only so that we can run this against B2, S3 and whatever
# But maybe it is not necessary (if restic treats them uniformly enough)
Backups.provider().backupper.lock()
snap = Backups.back_up(dummy_service)
assert snap is not None
Backups.provider().backupper.lock()
# using lowlevel to make sure no caching interferes
assert Backups.provider().backupper.is_initted() is True
Backups.provider().backupper.lock()
assert Backups.snapshot_restored_size(snap.id) > 0
Backups.provider().backupper.lock()
Backups.restore_snapshot(snap)
Backups.provider().backupper.lock()
Backups.forget_snapshot(snap)
Backups.provider().backupper.lock()
assert Backups.provider().backupper.get_snapshots() == []
# check that no locks were left
Backups.provider().backupper.lock()
Backups.provider().backupper.unlock()
# a paranoid check to weed out problems with tempdirs that are not dependent on us
def test_tempfile():
with tempfile.TemporaryDirectory() as temp:
assert path.exists(temp)
assert not path.exists(temp)
# Storage
def test_cache_invalidaton_task(backups, dummy_service):
Backups.back_up(dummy_service)
assert len(Storage.get_cached_snapshots()) == 1
# Does not trigger resync
Storage.invalidate_snapshot_storage()
assert Storage.get_cached_snapshots() == []
reload_snapshot_cache()
assert len(Storage.get_cached_snapshots()) == 1

View File

@ -1,92 +0,0 @@
import pytest
from os import mkdir, rmdir
from os.path import join, exists
from tests.conftest import ensure_user_exists
from tests.test_graphql.test_services import mock_lsblk_devices
from selfprivacy_api.services.owned_path import Bind, BindError
from selfprivacy_api.utils.block_devices import BlockDevices
from selfprivacy_api.utils.waitloop import wait_until_true
BINDTESTS_USER = "binduser"
TESTFILE_CONTENTS = "testissimo"
TESTFILE_NAME = "testfile"
@pytest.fixture()
def bind_user():
ensure_user_exists(BINDTESTS_USER)
return BINDTESTS_USER
def prepare_test_bind(tmpdir, bind_user) -> Bind:
test_binding_name = "bindy_dir"
binding_path = join(tmpdir, test_binding_name)
drive = BlockDevices().get_block_device("sda2")
assert drive is not None
bind = Bind(
binding_path=binding_path, owner=bind_user, group=bind_user, drive=drive
)
source_dir = bind.location_at_volume()
mkdir(source_dir)
mkdir(binding_path)
testfile_path = join(source_dir, TESTFILE_NAME)
with open(testfile_path, "w") as file:
file.write(TESTFILE_CONTENTS)
return bind
def test_bind_unbind(volume_folders, tmpdir, bind_user, mock_lsblk_devices):
bind = prepare_test_bind(tmpdir, bind_user)
bind.ensure_ownership()
bind.validate()
testfile_path = join(bind.location_at_volume(), TESTFILE_NAME)
assert exists(testfile_path)
with open(testfile_path, "r") as file:
assert file.read() == TESTFILE_CONTENTS
bind.bind()
testfile_binding_path = join(bind.binding_path, TESTFILE_NAME)
assert exists(testfile_path)
with open(testfile_path, "r") as file:
assert file.read() == TESTFILE_CONTENTS
bind.unbind()
# wait_until_true(lambda : not exists(testfile_binding_path), timeout_sec=2)
assert not exists(testfile_binding_path)
assert exists(bind.binding_path)
def test_bind_nonexistent_target(volume_folders, tmpdir, bind_user, mock_lsblk_devices):
bind = prepare_test_bind(tmpdir, bind_user)
bind.ensure_ownership()
bind.validate()
rmdir(bind.binding_path)
with pytest.raises(BindError):
bind.bind()
def test_unbind_nonexistent_target(
volume_folders, tmpdir, bind_user, mock_lsblk_devices
):
bind = prepare_test_bind(tmpdir, bind_user)
bind.ensure_ownership()
bind.validate()
bind.bind()
bind.binding_path = "/bogus"
with pytest.raises(BindError):
bind.unbind()

View File

@ -67,7 +67,7 @@ def only_root_in_userdata(mocker, datadir):
read_json(datadir / "only_root.json")["volumes"][0]["mountPoint"]
== "/volumes/sda1"
)
assert read_json(datadir / "only_root.json")["volumes"][0]["fsType"] == "ext4"
assert read_json(datadir / "only_root.json")["volumes"][0]["filesystem"] == "ext4"
return datadir
@ -410,44 +410,38 @@ def lsblk_full_mock(mocker):
mock = mocker.patch(
"subprocess.check_output", autospec=True, return_value=FULL_LSBLK_OUTPUT
)
BlockDevices().update()
return mock
def test_get_block_devices(lsblk_full_mock, authorized_client):
block_devices = BlockDevices().get_block_devices()
assert len(block_devices) == 2
devices_by_name = {device.name: device for device in block_devices}
sda1 = devices_by_name["sda1"]
sdb = devices_by_name["sdb"]
assert sda1.name == "sda1"
assert sda1.path == "/dev/sda1"
assert sda1.fsavail == "4605702144"
assert sda1.fssize == "19814920192"
assert sda1.fstype == "ext4"
assert sda1.fsused == "14353719296"
assert sda1.mountpoints == ["/nix/store", "/"]
assert sda1.label is None
assert sda1.uuid == "ec80c004-baec-4a2c-851d-0e1807135511"
assert sda1.size == "20210236928"
assert sda1.model is None
assert sda1.serial is None
assert sda1.type == "part"
assert sdb.name == "sdb"
assert sdb.path == "/dev/sdb"
assert sdb.fsavail == "11888545792"
assert sdb.fssize == "12573614080"
assert sdb.fstype == "ext4"
assert sdb.fsused == "24047616"
assert sdb.mountpoints == ["/volumes/sdb"]
assert sdb.label is None
assert sdb.uuid == "fa9d0026-ee23-4047-b8b1-297ae16fa751"
assert sdb.size == "12884901888"
assert sdb.model == "Volume"
assert sdb.serial == "21378102"
assert sdb.type == "disk"
assert block_devices[0].name == "sda1"
assert block_devices[0].path == "/dev/sda1"
assert block_devices[0].fsavail == "4605702144"
assert block_devices[0].fssize == "19814920192"
assert block_devices[0].fstype == "ext4"
assert block_devices[0].fsused == "14353719296"
assert block_devices[0].mountpoints == ["/nix/store", "/"]
assert block_devices[0].label is None
assert block_devices[0].uuid == "ec80c004-baec-4a2c-851d-0e1807135511"
assert block_devices[0].size == "20210236928"
assert block_devices[0].model is None
assert block_devices[0].serial is None
assert block_devices[0].type == "part"
assert block_devices[1].name == "sdb"
assert block_devices[1].path == "/dev/sdb"
assert block_devices[1].fsavail == "11888545792"
assert block_devices[1].fssize == "12573614080"
assert block_devices[1].fstype == "ext4"
assert block_devices[1].fsused == "24047616"
assert block_devices[1].mountpoints == ["/volumes/sdb"]
assert block_devices[1].label is None
assert block_devices[1].uuid == "fa9d0026-ee23-4047-b8b1-297ae16fa751"
assert block_devices[1].size == "12884901888"
assert block_devices[1].model == "Volume"
assert block_devices[1].serial == "21378102"
assert block_devices[1].type == "disk"
def test_get_block_device(lsblk_full_mock, authorized_client):
@ -512,30 +506,3 @@ def test_get_root_block_device(lsblk_full_mock, authorized_client):
assert block_device.model is None
assert block_device.serial is None
assert block_device.type == "part"
# Unassuming sanity check, yes this did fail
def test_get_real_devices():
block_devices = BlockDevices().get_block_devices()
assert block_devices is not None
assert len(block_devices) > 0
# Unassuming sanity check
def test_get_real_root_device():
devices = BlockDevices().get_block_devices()
try:
block_device = BlockDevices().get_root_block_device()
except Exception as e:
raise Exception("cannot get root device:", e, "devices found:", devices)
assert block_device is not None
assert block_device.name is not None
assert block_device.name != ""
def test_get_real_root_device_raw(authorized_client):
block_device = BlockDevices().get_root_block_device()
assert block_device is not None
assert block_device.name is not None
assert block_device.name != ""

View File

@ -1,59 +1,59 @@
{
"dns": {
"provider": "CLOUDFLARE",
"useStagingACME": false
"api": {
"token": "TEST_TOKEN",
"enableSwagger": false
},
"server": {
"provider": "HETZNER"
"bitwarden": {
"enable": true
},
"domain": "test-domain.tld",
"databasePassword": "PASSWORD",
"domain": "test.tld",
"hashedMasterPassword": "HASHED_PASSWORD",
"hostname": "test-instance",
"timezone": "Etc/UTC",
"username": "tester",
"useBinds": true,
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"users": [],
"autoUpgrade": {
"enable": true,
"allowReboot": true
"nextcloud": {
"adminPassword": "ADMIN",
"databasePassword": "ADMIN",
"enable": true
},
"modules": {
"bitwarden": {
"enable": true,
"location": "sdb"
},
"gitea": {
"enable": true,
"location": "sdb"
},
"jitsi-meet": {
"enable": true
},
"nextcloud": {
"enable": true,
"location": "sdb"
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true,
"location": "sdb"
},
"simple-nixos-mailserver": {
"enable": true,
"location": "sdb"
}
},
"volumes": [],
"resticPassword": "PASS",
"ssh": {
"enable": true,
"passwordAuthentication": true,
"rootKeys": [
"ssh-ed25519 KEY test@pc"
]
}
},
"username": "tester",
"gitea": {
"enable": false
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true
},
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"timezone": "Europe/Moscow",
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"dns": {
"provider": "CLOUDFLARE",
"apiKey": "TOKEN"
},
"server": {
"provider": "HETZNER"
},
"backup": {
"provider": "BACKBLAZE",
"accountId": "ID",
"accountKey": "KEY",
"bucket": "selfprivacy"
},
"volumes": [
]
}

View File

@ -1,65 +1,64 @@
{
"dns": {
"provider": "CLOUDFLARE",
"useStagingACME": false
"api": {
"token": "TEST_TOKEN",
"enableSwagger": false
},
"server": {
"provider": "HETZNER"
"bitwarden": {
"enable": true
},
"domain": "test-domain.tld",
"databasePassword": "PASSWORD",
"domain": "test.tld",
"hashedMasterPassword": "HASHED_PASSWORD",
"hostname": "test-instance",
"timezone": "Etc/UTC",
"username": "tester",
"useBinds": true,
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"users": [],
"autoUpgrade": {
"enable": true,
"allowReboot": true
"nextcloud": {
"adminPassword": "ADMIN",
"databasePassword": "ADMIN",
"enable": true
},
"modules": {
"bitwarden": {
"enable": true,
"location": "sdb"
},
"gitea": {
"enable": true,
"location": "sdb"
},
"jitsi-meet": {
"enable": true
},
"nextcloud": {
"enable": true,
"location": "sdb"
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true,
"location": "sdb"
},
"simple-nixos-mailserver": {
"enable": true,
"location": "sdb"
}
},
"volumes": [
{
"device": "/dev/sda1",
"mountPoint": "/volumes/sda1",
"fsType": "ext4"
}
],
"resticPassword": "PASS",
"ssh": {
"enable": true,
"passwordAuthentication": true,
"rootKeys": [
"ssh-ed25519 KEY test@pc"
]
},
"username": "tester",
"gitea": {
"enable": false
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true
},
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"timezone": "Europe/Moscow",
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"volumes": [
{
"device": "/dev/sda1",
"mountPoint": "/volumes/sda1",
"filesystem": "ext4"
}
],
"dns": {
"provider": "CLOUDFLARE",
"apiKey": "TOKEN"
},
"server": {
"provider": "HETZNER"
},
"backup": {
"provider": "BACKBLAZE",
"accountId": "ID",
"accountKey": "KEY",
"bucket": "selfprivacy"
}
}

View File

@ -1,58 +1,57 @@
{
"dns": {
"provider": "CLOUDFLARE",
"useStagingACME": false
"api": {
"token": "TEST_TOKEN",
"enableSwagger": false
},
"server": {
"provider": "HETZNER"
"bitwarden": {
"enable": true
},
"domain": "test-domain.tld",
"databasePassword": "PASSWORD",
"domain": "test.tld",
"hashedMasterPassword": "HASHED_PASSWORD",
"hostname": "test-instance",
"timezone": "Etc/UTC",
"username": "tester",
"useBinds": true,
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"users": [],
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"modules": {
"bitwarden": {
"enable": true,
"location": "sdb"
},
"gitea": {
"enable": true,
"location": "sdb"
},
"jitsi-meet": {
"enable": true
},
"nextcloud": {
"enable": true,
"location": "sdb"
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true,
"location": "sdb"
},
"simple-nixos-mailserver": {
"enable": true,
"location": "sdb"
}
"nextcloud": {
"adminPassword": "ADMIN",
"databasePassword": "ADMIN",
"enable": true
},
"resticPassword": "PASS",
"ssh": {
"enable": true,
"passwordAuthentication": true,
"rootKeys": [
"ssh-ed25519 KEY test@pc"
]
},
"username": "tester",
"gitea": {
"enable": false
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true
},
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"timezone": "Europe/Moscow",
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"dns": {
"provider": "CLOUDFLARE",
"apiKey": "TOKEN"
},
"server": {
"provider": "HETZNER"
},
"backup": {
"provider": "BACKBLAZE",
"accountId": "ID",
"accountKey": "KEY",
"bucket": "selfprivacy"
}
}

View File

@ -1,5 +1,6 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import json
import os
import pytest

View File

@ -1,52 +0,0 @@
import pytest
import os
from os import path
from tests.conftest import global_data_dir
from selfprivacy_api.utils import get_dkim_key, get_domain
###############################################################################
DKIM_FILE_CONTENT = b'selector._domainkey\tIN\tTXT\t( "v=DKIM1; k=rsa; "\n\t "p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDNn/IhEz1SxgHxxxI8vlPYC2dNueiLe1GC4SYz8uHimC8SDkMvAwm7rqi2SimbFgGB5nccCNOqCkrIqJTCB9vufqBnVKAjshHqpOr5hk4JJ1T/AGQKWinstmDbfTLPYTbU8ijZrwwGeqQLlnXR5nSN0GB9GazheA9zaPsT6PV+aQIDAQAB" ) ; ----- DKIM key selector for test-domain.tld\n'
@pytest.fixture
def dkim_file(mocker, tmpdir, generic_userdata):
domain = get_domain()
assert domain is not None
assert domain != ""
filename = domain + ".selector.txt"
dkim_path = path.join(tmpdir, filename)
with open(dkim_path, "wb") as file:
file.write(DKIM_FILE_CONTENT)
mocker.patch("selfprivacy_api.utils.DKIM_DIR", tmpdir)
return dkim_path
@pytest.fixture
def no_dkim_file(dkim_file):
os.remove(dkim_file)
assert path.exists(dkim_file) is False
return dkim_file
###############################################################################
def test_get_dkim_key(dkim_file):
"""Test DKIM key"""
dkim_key = get_dkim_key("test-domain.tld")
assert (
dkim_key
== "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDNn/IhEz1SxgHxxxI8vlPYC2dNueiLe1GC4SYz8uHimC8SDkMvAwm7rqi2SimbFgGB5nccCNOqCkrIqJTCB9vufqBnVKAjshHqpOr5hk4JJ1T/AGQKWinstmDbfTLPYTbU8ijZrwwGeqQLlnXR5nSN0GB9GazheA9zaPsT6PV+aQIDAQAB"
)
def test_no_dkim_key(no_dkim_file):
"""Test no DKIM key"""
dkim_key = get_dkim_key("test-domain.tld")
assert dkim_key is None

View File

@ -0,0 +1,89 @@
from tests.common import generate_api_query
from tests.conftest import TOKENS_FILE_CONTENTS, DEVICE_WE_AUTH_TESTS_WITH
ORIGINAL_DEVICES = TOKENS_FILE_CONTENTS["tokens"]
def assert_ok(response, request):
data = assert_data(response)
assert data[request]["success"] is True
assert data[request]["message"] is not None
assert data[request]["code"] == 200
def assert_errorcode(response, request, code):
data = assert_data(response)
assert data[request]["success"] is False
assert data[request]["message"] is not None
assert data[request]["code"] == code
def assert_empty(response):
assert response.status_code == 200
assert response.json().get("data") is None
def assert_data(response):
assert response.status_code == 200
data = response.json().get("data")
assert data is not None
assert "api" in data.keys()
return data["api"]
API_DEVICES_QUERY = """
devices {
creationDate
isCaller
name
}
"""
def request_devices(client):
return client.post(
"/graphql",
json={"query": generate_api_query([API_DEVICES_QUERY])},
)
def graphql_get_devices(client):
response = request_devices(client)
data = assert_data(response)
devices = data["devices"]
assert devices is not None
return devices
def set_client_token(client, token):
client.headers.update({"Authorization": "Bearer " + token})
def assert_token_valid(client, token):
set_client_token(client, token)
assert graphql_get_devices(client) is not None
def assert_same(graphql_devices, abstract_devices):
"""Orderless comparison"""
assert len(graphql_devices) == len(abstract_devices)
for original_device in abstract_devices:
assert original_device["name"] in [device["name"] for device in graphql_devices]
for device in graphql_devices:
if device["name"] == original_device["name"]:
assert device["creationDate"] == original_device["date"].isoformat()
def assert_original(client):
devices = graphql_get_devices(client)
assert_original_devices(devices)
def assert_original_devices(devices):
assert_same(devices, ORIGINAL_DEVICES)
for device in devices:
if device["name"] == DEVICE_WE_AUTH_TESTS_WITH["name"]:
assert device["isCaller"] is True
else:
assert device["isCaller"] is False

Some files were not shown because too many files have changed in this diff Show More