Merge branch 'master' into redis/connection-pool
continuous-integration/drone/push Build is failing Details

pull/68/head
Inex Code 2023-10-24 19:25:36 +03:00
commit 829aca14be
88 changed files with 5897 additions and 1584 deletions

View File

@ -5,8 +5,11 @@ name: default
steps:
- name: Run Tests and Generate Coverage Report
commands:
- kill $(ps aux | grep '[r]edis-server 127.0.0.1:6389' | awk '{print $2}')
- kill $(ps aux | grep 'redis-server 127.0.0.1:6389' | awk '{print $2}') || true
- redis-server --bind 127.0.0.1 --port 6389 >/dev/null &
# We do not care about persistance on CI
- sleep 10
- redis-cli -h 127.0.0.1 -p 6389 config set stop-writes-on-bgsave-error no
- coverage run -m pytest -q
- coverage xml
- sonar-scanner -Dsonar.projectKey=SelfPrivacy-REST-API -Dsonar.sources=. -Dsonar.host.url=http://analyzer.lan:9000 -Dsonar.login="$SONARQUBE_TOKEN"
@ -26,3 +29,7 @@ steps:
node:
server: builder
trigger:
event:
- push

4
.flake8 Normal file
View File

@ -0,0 +1,4 @@
[flake8]
max-line-length = 80
select = C,E,F,W,B,B950
extend-ignore = E203, E501

1
.gitignore vendored
View File

@ -147,3 +147,4 @@ cython_debug/
# End of https://www.toptal.com/developers/gitignore/api/flask
*.db
*.rdb

View File

@ -1,3 +1,6 @@
[MASTER]
init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))"
extension-pkg-whitelist=pydantic
[FORMAT]
max-line-length=88

64
api.nix
View File

@ -1,64 +0,0 @@
{ lib, python39Packages }:
with python39Packages;
buildPythonApplication {
pname = "selfprivacy-api";
version = "2.0.0";
propagatedBuildInputs = [
setuptools
portalocker
pytz
pytest
pytest-mock
pytest-datadir
huey
gevent
mnemonic
pydantic
typing-extensions
psutil
fastapi
uvicorn
(buildPythonPackage rec {
pname = "strawberry-graphql";
version = "0.123.0";
format = "pyproject";
patches = [
./strawberry-graphql.patch
];
propagatedBuildInputs = [
typing-extensions
python-multipart
python-dateutil
# flask
pydantic
pygments
poetry
# flask-cors
(buildPythonPackage rec {
pname = "graphql-core";
version = "3.2.0";
format = "setuptools";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-huKgvgCL/eGe94OI3opyWh2UKpGQykMcJKYIN5c4A84=";
};
checkInputs = [
pytest-asyncio
pytest-benchmark
pytestCheckHook
];
pythonImportsCheck = [
"graphql"
];
})
];
src = fetchPypi {
inherit pname version;
sha256 = "KsmZ5Xv8tUg6yBxieAEtvoKoRG60VS+iVGV0X6oCExo=";
};
})
];
src = ./.;
}

View File

@ -1,2 +0,0 @@
{ pkgs ? import <nixpkgs> {} }:
pkgs.callPackage ./api.nix {}

View File

@ -49,19 +49,6 @@ def set_ssh_settings(
data["ssh"]["passwordAuthentication"] = password_authentication
def add_root_ssh_key(public_key: str):
with WriteUserData() as data:
if "ssh" not in data:
data["ssh"] = {}
if "rootKeys" not in data["ssh"]:
data["ssh"]["rootKeys"] = []
# Return 409 if key already in array
for key in data["ssh"]["rootKeys"]:
if key == public_key:
raise KeyAlreadyExists()
data["ssh"]["rootKeys"].append(public_key)
class KeyAlreadyExists(Exception):
"""Key already exists"""

View File

@ -9,7 +9,6 @@ import uvicorn
from selfprivacy_api.dependencies import get_api_version
from selfprivacy_api.graphql.schema import schema
from selfprivacy_api.migrations import run_migrations
from selfprivacy_api.restic_controller.tasks import init_restic
from selfprivacy_api.rest import (
system,
@ -49,7 +48,6 @@ async def get_version():
@app.on_event("startup")
async def startup():
run_migrations()
init_restic()
if __name__ == "__main__":

View File

@ -0,0 +1,753 @@
"""
This module contains the controller class for backups.
"""
from datetime import datetime, timedelta
import os
from os import statvfs
from typing import Callable, List, Optional
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.services import (
get_service_by_id,
get_all_services,
)
from selfprivacy_api.services.service import (
Service,
ServiceStatus,
StoppedService,
)
from selfprivacy_api.jobs import Jobs, JobStatus, Job
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
from selfprivacy_api.graphql.common_types.backup import (
RestoreStrategy,
BackupReason,
AutobackupQuotas,
)
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
from selfprivacy_api.backup.providers import get_provider
from selfprivacy_api.backup.storage import Storage
from selfprivacy_api.backup.jobs import (
get_backup_job,
add_backup_job,
get_restore_job,
add_restore_job,
)
DEFAULT_JSON_PROVIDER = {
"provider": "BACKBLAZE",
"accountId": "",
"accountKey": "",
"bucket": "",
}
BACKUP_PROVIDER_ENVS = {
"kind": "BACKUP_KIND",
"login": "BACKUP_LOGIN",
"key": "BACKUP_KEY",
"location": "BACKUP_LOCATION",
}
AUTOBACKUP_JOB_EXPIRATION_SECONDS = 60 * 60 # one hour
class NotDeadError(AssertionError):
"""
This error is raised when we try to back up a service that is not dead yet.
"""
def __init__(self, service: Service):
self.service_name = service.get_id()
super().__init__()
def __str__(self):
return f"""
Service {self.service_name} should be either stopped or dead from
an error before we back up.
Normally, this error is unreachable because we do try ensure this.
Apparently, not this time.
"""
class RotationBucket:
"""
Bucket object used for rotation.
Has the following mutable fields:
- the counter, int
- the lambda function which takes datetime and the int and returns the int
- the last, int
"""
def __init__(self, counter: int, last: int, rotation_lambda):
self.counter: int = counter
self.last: int = last
self.rotation_lambda: Callable[[datetime, int], int] = rotation_lambda
def __str__(self) -> str:
return f"Bucket(counter={self.counter}, last={self.last})"
class Backups:
"""A stateless controller class for backups"""
# Providers
@staticmethod
def provider() -> AbstractBackupProvider:
"""
Returns the current backup storage provider.
"""
return Backups._lookup_provider()
@staticmethod
def set_provider(
kind: BackupProviderEnum,
login: str,
key: str,
location: str,
repo_id: str = "",
) -> None:
"""
Sets the new configuration of the backup storage provider.
In case of `BackupProviderEnum.BACKBLAZE`, the `login` is the key ID,
the `key` is the key itself, and the `location` is the bucket name and
the `repo_id` is the bucket ID.
"""
provider: AbstractBackupProvider = Backups._construct_provider(
kind,
login,
key,
location,
repo_id,
)
Storage.store_provider(provider)
@staticmethod
def reset(reset_json=True) -> None:
"""
Deletes all the data about the backup storage provider.
"""
Storage.reset()
if reset_json:
try:
Backups._reset_provider_json()
except FileNotFoundError:
# if there is no userdata file, we do not need to reset it
pass
@staticmethod
def _lookup_provider() -> AbstractBackupProvider:
redis_provider = Backups._load_provider_redis()
if redis_provider is not None:
return redis_provider
try:
json_provider = Backups._load_provider_json()
except FileNotFoundError:
json_provider = None
if json_provider is not None:
Storage.store_provider(json_provider)
return json_provider
none_provider = Backups._construct_provider(
BackupProviderEnum.NONE, login="", key="", location=""
)
Storage.store_provider(none_provider)
return none_provider
@staticmethod
def set_provider_from_envs():
for env in BACKUP_PROVIDER_ENVS.values():
if env not in os.environ.keys():
raise ValueError(
f"Cannot set backup provider from envs, there is no {env} set"
)
kind_str = os.environ[BACKUP_PROVIDER_ENVS["kind"]]
kind_enum = BackupProviderEnum[kind_str]
provider = Backups._construct_provider(
kind=kind_enum,
login=os.environ[BACKUP_PROVIDER_ENVS["login"]],
key=os.environ[BACKUP_PROVIDER_ENVS["key"]],
location=os.environ[BACKUP_PROVIDER_ENVS["location"]],
)
Storage.store_provider(provider)
@staticmethod
def _construct_provider(
kind: BackupProviderEnum,
login: str,
key: str,
location: str,
repo_id: str = "",
) -> AbstractBackupProvider:
provider_class = get_provider(kind)
return provider_class(
login=login,
key=key,
location=location,
repo_id=repo_id,
)
@staticmethod
def _load_provider_redis() -> Optional[AbstractBackupProvider]:
provider_model = Storage.load_provider()
if provider_model is None:
return None
return Backups._construct_provider(
BackupProviderEnum[provider_model.kind],
provider_model.login,
provider_model.key,
provider_model.location,
provider_model.repo_id,
)
@staticmethod
def _load_provider_json() -> Optional[AbstractBackupProvider]:
with ReadUserData() as user_data:
provider_dict = {
"provider": "",
"accountId": "",
"accountKey": "",
"bucket": "",
}
if "backup" not in user_data.keys():
if "backblaze" in user_data.keys():
provider_dict.update(user_data["backblaze"])
provider_dict["provider"] = "BACKBLAZE"
return None
else:
provider_dict.update(user_data["backup"])
if provider_dict == DEFAULT_JSON_PROVIDER:
return None
try:
return Backups._construct_provider(
kind=BackupProviderEnum[provider_dict["provider"]],
login=provider_dict["accountId"],
key=provider_dict["accountKey"],
location=provider_dict["bucket"],
)
except KeyError:
return None
@staticmethod
def _reset_provider_json() -> None:
with WriteUserData() as user_data:
if "backblaze" in user_data.keys():
del user_data["backblaze"]
user_data["backup"] = DEFAULT_JSON_PROVIDER
# Init
@staticmethod
def init_repo() -> None:
"""
Initializes the backup repository. This is required once per repo.
"""
Backups.provider().backupper.init()
Storage.mark_as_init()
@staticmethod
def erase_repo() -> None:
"""
Completely empties the remote
"""
Backups.provider().backupper.erase_repo()
Storage.mark_as_uninitted()
@staticmethod
def is_initted() -> bool:
"""
Returns whether the backup repository is initialized or not.
If it is not initialized, we cannot back up and probably should
call `init_repo` first.
"""
if Storage.has_init_mark():
return True
initted = Backups.provider().backupper.is_initted()
if initted:
Storage.mark_as_init()
return True
return False
# Backup
@staticmethod
def back_up(
service: Service, reason: BackupReason = BackupReason.EXPLICIT
) -> Snapshot:
"""The top-level function to back up a service"""
folders = service.get_folders()
service_name = service.get_id()
job = get_backup_job(service)
if job is None:
job = add_backup_job(service)
Jobs.update(job, status=JobStatus.RUNNING)
try:
service.pre_backup()
snapshot = Backups.provider().backupper.start_backup(
folders,
service_name,
reason=reason,
)
Backups._store_last_snapshot(service_name, snapshot)
if reason == BackupReason.AUTO:
Backups._prune_auto_snaps(service)
service.post_restore()
except Exception as error:
Jobs.update(job, status=JobStatus.ERROR, status_text=str(error))
raise error
Jobs.update(job, status=JobStatus.FINISHED)
if reason in [BackupReason.AUTO, BackupReason.PRE_RESTORE]:
Jobs.set_expiration(job, AUTOBACKUP_JOB_EXPIRATION_SECONDS)
return snapshot
@staticmethod
def _auto_snaps(service):
return [
snap
for snap in Backups.get_snapshots(service)
if snap.reason == BackupReason.AUTO
]
@staticmethod
def _prune_snaps_with_quotas(snapshots: List[Snapshot]) -> List[Snapshot]:
# Function broken out for testability
# Sorting newest first
sorted_snaps = sorted(snapshots, key=lambda s: s.created_at, reverse=True)
quotas: AutobackupQuotas = Backups.autobackup_quotas()
buckets: list[RotationBucket] = [
RotationBucket(
quotas.last, # type: ignore
-1,
lambda _, index: index,
),
RotationBucket(
quotas.daily, # type: ignore
-1,
lambda date, _: date.year * 10000 + date.month * 100 + date.day,
),
RotationBucket(
quotas.weekly, # type: ignore
-1,
lambda date, _: date.year * 100 + date.isocalendar()[1],
),
RotationBucket(
quotas.monthly, # type: ignore
-1,
lambda date, _: date.year * 100 + date.month,
),
RotationBucket(
quotas.yearly, # type: ignore
-1,
lambda date, _: date.year,
),
]
new_snaplist: List[Snapshot] = []
for i, snap in enumerate(sorted_snaps):
keep_snap = False
for bucket in buckets:
if (bucket.counter > 0) or (bucket.counter == -1):
val = bucket.rotation_lambda(snap.created_at, i)
if (val != bucket.last) or (i == len(sorted_snaps) - 1):
bucket.last = val
if bucket.counter > 0:
bucket.counter -= 1
if not keep_snap:
new_snaplist.append(snap)
keep_snap = True
return new_snaplist
@staticmethod
def _prune_auto_snaps(service) -> None:
# Not very testable by itself, so most testing is going on Backups._prune_snaps_with_quotas
# We can still test total limits and, say, daily limits
auto_snaps = Backups._auto_snaps(service)
new_snaplist = Backups._prune_snaps_with_quotas(auto_snaps)
# TODO: Can be optimized since there is forgetting of an array in one restic op
# but most of the time this will be only one snap to forget.
for snap in auto_snaps:
if snap not in new_snaplist:
Backups.forget_snapshot(snap)
@staticmethod
def _standardize_quotas(i: int) -> int:
if i <= -1:
i = -1
return i
@staticmethod
def autobackup_quotas() -> AutobackupQuotas:
"""0 means do not keep, -1 means unlimited"""
return Storage.autobackup_quotas()
@staticmethod
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
"""0 means do not keep, -1 means unlimited"""
Storage.set_autobackup_quotas(
AutobackupQuotas(
last=Backups._standardize_quotas(quotas.last), # type: ignore
daily=Backups._standardize_quotas(quotas.daily), # type: ignore
weekly=Backups._standardize_quotas(quotas.weekly), # type: ignore
monthly=Backups._standardize_quotas(quotas.monthly), # type: ignore
yearly=Backups._standardize_quotas(quotas.yearly), # type: ignore
)
)
for service in get_all_services():
Backups._prune_auto_snaps(service)
# Restoring
@staticmethod
def _ensure_queued_restore_job(service, snapshot) -> Job:
job = get_restore_job(service)
if job is None:
job = add_restore_job(snapshot)
Jobs.update(job, status=JobStatus.CREATED)
return job
@staticmethod
def _inplace_restore(
service: Service,
snapshot: Snapshot,
job: Job,
) -> None:
Jobs.update(
job, status=JobStatus.CREATED, status_text="Waiting for pre-restore backup"
)
failsafe_snapshot = Backups.back_up(service, BackupReason.PRE_RESTORE)
Jobs.update(
job, status=JobStatus.RUNNING, status_text=f"Restoring from {snapshot.id}"
)
try:
Backups._restore_service_from_snapshot(
service,
snapshot.id,
verify=False,
)
except Exception as error:
Jobs.update(
job,
status=JobStatus.ERROR,
status_text=f"Restore failed with {str(error)}, reverting to {failsafe_snapshot.id}",
)
Backups._restore_service_from_snapshot(
service, failsafe_snapshot.id, verify=False
)
Jobs.update(
job,
status=JobStatus.ERROR,
status_text=f"Restore failed with {str(error)}, reverted to {failsafe_snapshot.id}",
)
raise error
@staticmethod
def restore_snapshot(
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
) -> None:
"""Restores a snapshot to its original service using the given strategy"""
service = get_service_by_id(snapshot.service_name)
if service is None:
raise ValueError(
f"snapshot has a nonexistent service: {snapshot.service_name}"
)
job = Backups._ensure_queued_restore_job(service, snapshot)
try:
Backups._assert_restorable(snapshot)
Jobs.update(
job, status=JobStatus.RUNNING, status_text="Stopping the service"
)
with StoppedService(service):
Backups.assert_dead(service)
if strategy == RestoreStrategy.INPLACE:
Backups._inplace_restore(service, snapshot, job)
else: # verify_before_download is our default
Jobs.update(
job,
status=JobStatus.RUNNING,
status_text=f"Restoring from {snapshot.id}",
)
Backups._restore_service_from_snapshot(
service, snapshot.id, verify=True
)
service.post_restore()
Jobs.update(
job,
status=JobStatus.RUNNING,
progress=90,
status_text="Restarting the service",
)
except Exception as error:
Jobs.update(job, status=JobStatus.ERROR, status_text=str(error))
raise error
Jobs.update(job, status=JobStatus.FINISHED)
@staticmethod
def _assert_restorable(
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
) -> None:
service = get_service_by_id(snapshot.service_name)
if service is None:
raise ValueError(
f"snapshot has a nonexistent service: {snapshot.service_name}"
)
restored_snap_size = Backups.snapshot_restored_size(snapshot.id)
if strategy == RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE:
needed_space = restored_snap_size
elif strategy == RestoreStrategy.INPLACE:
needed_space = restored_snap_size - service.get_storage_usage()
else:
raise NotImplementedError(
"""
We do not know if there is enough space for restoration because
there is some novel restore strategy used!
This is a developer's fault, open an issue please
"""
)
available_space = Backups.space_usable_for_service(service)
if needed_space > available_space:
raise ValueError(
f"we only have {available_space} bytes "
f"but snapshot needs {needed_space}"
)
@staticmethod
def _restore_service_from_snapshot(
service: Service,
snapshot_id: str,
verify=True,
) -> None:
folders = service.get_folders()
Backups.provider().backupper.restore_from_backup(
snapshot_id,
folders,
verify=verify,
)
# Snapshots
@staticmethod
def get_snapshots(service: Service) -> List[Snapshot]:
"""Returns all snapshots for a given service"""
snapshots = Backups.get_all_snapshots()
service_id = service.get_id()
return list(
filter(
lambda snap: snap.service_name == service_id,
snapshots,
)
)
@staticmethod
def get_all_snapshots() -> List[Snapshot]:
"""Returns all snapshots"""
cached_snapshots = Storage.get_cached_snapshots()
if cached_snapshots:
return cached_snapshots
# TODO: the oldest snapshots will get expired faster than the new ones.
# How to detect that the end is missing?
Backups.force_snapshot_cache_reload()
return Storage.get_cached_snapshots()
@staticmethod
def get_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
"""Returns a backup snapshot by its id"""
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
if snap is not None:
return snap
# Possibly our cache entry got invalidated, let's try one more time
Backups.force_snapshot_cache_reload()
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
return snap
@staticmethod
def forget_snapshot(snapshot: Snapshot) -> None:
"""Deletes a snapshot from the repo and from cache"""
Backups.provider().backupper.forget_snapshot(snapshot.id)
Storage.delete_cached_snapshot(snapshot)
@staticmethod
def forget_all_snapshots():
"""deliberately erase all snapshots we made"""
# there is no dedicated optimized command for this,
# but maybe we can have a multi-erase
for snapshot in Backups.get_all_snapshots():
Backups.forget_snapshot(snapshot)
@staticmethod
def force_snapshot_cache_reload() -> None:
"""
Forces a reload of the snapshot cache.
This may be an expensive operation, so use it wisely.
User pays for the API calls.
"""
upstream_snapshots = Backups.provider().backupper.get_snapshots()
Storage.invalidate_snapshot_storage()
for snapshot in upstream_snapshots:
Storage.cache_snapshot(snapshot)
@staticmethod
def snapshot_restored_size(snapshot_id: str) -> int:
"""Returns the size of the snapshot"""
return Backups.provider().backupper.restored_size(
snapshot_id,
)
@staticmethod
def _store_last_snapshot(service_id: str, snapshot: Snapshot) -> None:
"""What do we do with a snapshot that is just made?"""
# non-expiring timestamp of the last
Storage.store_last_timestamp(service_id, snapshot)
# expiring cache entry
Storage.cache_snapshot(snapshot)
# Autobackup
@staticmethod
def autobackup_period_minutes() -> Optional[int]:
"""None means autobackup is disabled"""
return Storage.autobackup_period_minutes()
@staticmethod
def set_autobackup_period_minutes(minutes: int) -> None:
"""
0 and negative numbers are equivalent to disable.
Setting to a positive number may result in a backup very soon
if some services are not backed up.
"""
if minutes <= 0:
Backups.disable_all_autobackup()
return
Storage.store_autobackup_period_minutes(minutes)
@staticmethod
def disable_all_autobackup() -> None:
"""
Disables all automatic backing up,
but does not change per-service settings
"""
Storage.delete_backup_period()
@staticmethod
def is_time_to_backup(time: datetime) -> bool:
"""
Intended as a time validator for huey cron scheduler
of automatic backups
"""
return Backups.services_to_back_up(time) != []
@staticmethod
def services_to_back_up(time: datetime) -> List[Service]:
"""Returns a list of services that should be backed up at a given time"""
return [
service
for service in get_all_services()
if Backups.is_time_to_backup_service(service, time)
]
@staticmethod
def get_last_backed_up(service: Service) -> Optional[datetime]:
"""Get a timezone-aware time of the last backup of a service"""
return Storage.get_last_backup_time(service.get_id())
@staticmethod
def is_time_to_backup_service(service: Service, time: datetime):
"""Returns True if it is time to back up a service"""
period = Backups.autobackup_period_minutes()
service_id = service.get_id()
if not service.can_be_backed_up():
return False
if period is None:
return False
last_backup = Storage.get_last_backup_time(service_id)
if last_backup is None:
# queue a backup immediately if there are no previous backups
return True
if time > last_backup + timedelta(minutes=period):
return True
return False
# Helpers
@staticmethod
def space_usable_for_service(service: Service) -> int:
"""
Returns the amount of space available on the volume the given
service is located on.
"""
folders = service.get_folders()
if folders == []:
raise ValueError("unallocated service", service.get_id())
# We assume all folders of one service live at the same volume
fs_info = statvfs(folders[0])
usable_bytes = fs_info.f_frsize * fs_info.f_bavail
return usable_bytes
@staticmethod
def set_localfile_repo(file_path: str):
"""Used by tests to set a local folder as a backup repo"""
# pylint: disable-next=invalid-name
ProviderClass = get_provider(BackupProviderEnum.FILE)
provider = ProviderClass(
login="",
key="",
location=file_path,
repo_id="",
)
Storage.store_provider(provider)
@staticmethod
def assert_dead(service: Service):
"""
Checks if a service is dead and can be safely restored from a snapshot.
"""
if service.get_status() not in [
ServiceStatus.INACTIVE,
ServiceStatus.FAILED,
]:
raise NotDeadError(service)

View File

@ -0,0 +1,68 @@
from abc import ABC, abstractmethod
from typing import List
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.graphql.common_types.backup import BackupReason
class AbstractBackupper(ABC):
"""Abstract class for backuppers"""
# flake8: noqa: B027
def __init__(self) -> None:
pass
@abstractmethod
def is_initted(self) -> bool:
"""Returns true if the repository is initted"""
raise NotImplementedError
@abstractmethod
def set_creds(self, account: str, key: str, repo: str) -> None:
"""Set the credentials for the backupper"""
raise NotImplementedError
@abstractmethod
def start_backup(
self,
folders: List[str],
service_name: str,
reason: BackupReason = BackupReason.EXPLICIT,
) -> Snapshot:
"""Start a backup of the given folders"""
raise NotImplementedError
@abstractmethod
def get_snapshots(self) -> List[Snapshot]:
"""Get all snapshots from the repo"""
raise NotImplementedError
@abstractmethod
def init(self) -> None:
"""Initialize the repository"""
raise NotImplementedError
@abstractmethod
def erase_repo(self) -> None:
"""Completely empties the remote"""
raise NotImplementedError
@abstractmethod
def restore_from_backup(
self,
snapshot_id: str,
folders: List[str],
verify=True,
) -> None:
"""Restore a target folder using a snapshot"""
raise NotImplementedError
@abstractmethod
def restored_size(self, snapshot_id: str) -> int:
"""Get the size of the restored snapshot"""
raise NotImplementedError
@abstractmethod
def forget_snapshot(self, snapshot_id) -> None:
"""Forget a snapshot"""
raise NotImplementedError

View File

@ -0,0 +1,42 @@
from typing import List
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.backup.backuppers import AbstractBackupper
from selfprivacy_api.graphql.common_types.backup import BackupReason
class NoneBackupper(AbstractBackupper):
"""A backupper that does nothing"""
def is_initted(self, repo_name: str = "") -> bool:
return False
def set_creds(self, account: str, key: str, repo: str):
pass
def start_backup(
self, folders: List[str], tag: str, reason: BackupReason = BackupReason.EXPLICIT
):
raise NotImplementedError
def get_snapshots(self) -> List[Snapshot]:
"""Get all snapshots from the repo"""
return []
def init(self):
raise NotImplementedError
def erase_repo(self) -> None:
"""Completely empties the remote"""
# this one is already empty
pass
def restore_from_backup(self, snapshot_id: str, folders: List[str], verify=True):
"""Restore a target folder using a snapshot"""
raise NotImplementedError
def restored_size(self, snapshot_id: str) -> int:
raise NotImplementedError
def forget_snapshot(self, snapshot_id):
raise NotImplementedError

View File

@ -0,0 +1,513 @@
from __future__ import annotations
import subprocess
import json
import datetime
import tempfile
from typing import List, Optional, TypeVar, Callable
from collections.abc import Iterable
from json.decoder import JSONDecodeError
from os.path import exists, join
from os import mkdir
from shutil import rmtree
from selfprivacy_api.graphql.common_types.backup import BackupReason
from selfprivacy_api.backup.util import output_yielder, sync
from selfprivacy_api.backup.backuppers import AbstractBackupper
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.backup.jobs import get_backup_job
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.jobs import Jobs, JobStatus
from selfprivacy_api.backup.local_secret import LocalBackupSecret
SHORT_ID_LEN = 8
T = TypeVar("T", bound=Callable)
def unlocked_repo(func: T) -> T:
"""unlock repo and retry if it appears to be locked"""
def inner(self: ResticBackupper, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as error:
if "unable to create lock" in str(error):
self.unlock()
return func(self, *args, **kwargs)
else:
raise error
# Above, we manually guarantee that the type returned is compatible.
return inner # type: ignore
class ResticBackupper(AbstractBackupper):
def __init__(self, login_flag: str, key_flag: str, storage_type: str) -> None:
self.login_flag = login_flag
self.key_flag = key_flag
self.storage_type = storage_type
self.account = ""
self.key = ""
self.repo = ""
super().__init__()
def set_creds(self, account: str, key: str, repo: str) -> None:
self.account = account
self.key = key
self.repo = repo
def restic_repo(self) -> str:
# https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#other-services-via-rclone
# https://forum.rclone.org/t/can-rclone-be-run-solely-with-command-line-options-no-config-no-env-vars/6314/5
return f"rclone:{self.rclone_repo()}"
def rclone_repo(self) -> str:
return f"{self.storage_type}{self.repo}"
def rclone_args(self):
return "rclone.args=serve restic --stdio " + " ".join(
self.backend_rclone_args()
)
def backend_rclone_args(self) -> list[str]:
args = []
if self.account != "":
acc_args = [self.login_flag, self.account]
args.extend(acc_args)
if self.key != "":
key_args = [self.key_flag, self.key]
args.extend(key_args)
return args
def _password_command(self):
return f"echo {LocalBackupSecret.get()}"
def restic_command(self, *args, tags: Optional[List[str]] = None) -> List[str]:
if tags is None:
tags = []
command = [
"restic",
"-o",
self.rclone_args(),
"-r",
self.restic_repo(),
"--password-command",
self._password_command(),
]
if tags != []:
for tag in tags:
command.extend(
[
"--tag",
tag,
]
)
if args:
command.extend(ResticBackupper.__flatten_list(args))
return command
def erase_repo(self) -> None:
"""Fully erases repo on remote, can be reinitted again"""
command = [
"rclone",
"purge",
self.rclone_repo(),
]
backend_args = self.backend_rclone_args()
if backend_args:
command.extend(backend_args)
with subprocess.Popen(command, stdout=subprocess.PIPE, shell=False) as handle:
output = handle.communicate()[0].decode("utf-8")
if handle.returncode != 0:
raise ValueError(
"purge exited with errorcode",
handle.returncode,
":",
output,
)
@staticmethod
def __flatten_list(list_to_flatten):
"""string-aware list flattener"""
result = []
for item in list_to_flatten:
if isinstance(item, Iterable) and not isinstance(item, str):
result.extend(ResticBackupper.__flatten_list(item))
continue
result.append(item)
return result
@unlocked_repo
def start_backup(
self,
folders: List[str],
service_name: str,
reason: BackupReason = BackupReason.EXPLICIT,
) -> Snapshot:
"""
Start backup with restic
"""
# but maybe it is ok to accept a union
# of a string and an array of strings
assert not isinstance(folders, str)
tags = [service_name, reason.value]
backup_command = self.restic_command(
"backup",
"--json",
folders,
tags=tags,
)
service = get_service_by_id(service_name)
if service is None:
raise ValueError("No service with id ", service_name)
job = get_backup_job(service)
messages = []
output = []
try:
for raw_message in output_yielder(backup_command):
output.append(raw_message)
message = self.parse_message(raw_message, job)
messages.append(message)
id = ResticBackupper._snapshot_id_from_backup_messages(messages)
return Snapshot(
created_at=datetime.datetime.now(datetime.timezone.utc),
id=id,
service_name=service_name,
reason=reason,
)
except ValueError as error:
raise ValueError(
"Could not create a snapshot: ",
str(error),
output,
"parsed messages:",
messages,
) from error
@staticmethod
def _snapshot_id_from_backup_messages(messages) -> str:
for message in messages:
if message["message_type"] == "summary":
# There is a discrepancy between versions of restic/rclone
# Some report short_id in this field and some full
return message["snapshot_id"][0:SHORT_ID_LEN]
raise ValueError("no summary message in restic json output")
def parse_message(self, raw_message_line: str, job=None) -> dict:
message = ResticBackupper.parse_json_output(raw_message_line)
if not isinstance(message, dict):
raise ValueError("we have too many messages on one line?")
if message["message_type"] == "status":
if job is not None: # only update status if we run under some job
Jobs.update(
job,
JobStatus.RUNNING,
progress=int(message["percent_done"] * 100),
)
return message
def init(self) -> None:
init_command = self.restic_command(
"init",
)
with subprocess.Popen(
init_command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as process_handle:
output = process_handle.communicate()[0].decode("utf-8")
if "created restic repository" not in output:
raise ValueError("cannot init a repo: " + output)
@unlocked_repo
def is_initted(self) -> bool:
command = self.restic_command(
"check",
)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
shell=False,
stderr=subprocess.STDOUT,
) as handle:
output = handle.communicate()[0].decode("utf-8")
if handle.returncode != 0:
if "unable to create lock" in output:
raise ValueError("Stale lock detected: ", output)
return False
return True
def unlock(self) -> None:
"""Remove stale locks."""
command = self.restic_command(
"unlock",
)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
shell=False,
stderr=subprocess.STDOUT,
) as handle:
# communication forces to complete and for returncode to get defined
output = handle.communicate()[0].decode("utf-8")
if handle.returncode != 0:
raise ValueError("cannot unlock the backup repository: ", output)
def lock(self) -> None:
"""
Introduce a stale lock.
Mainly for testing purposes.
Double lock is supposed to fail
"""
command = self.restic_command(
"check",
)
# using temporary cache in /run/user/1000/restic-check-cache-817079729
# repository 9639c714 opened (repository version 2) successfully, password is correct
# created new cache in /run/user/1000/restic-check-cache-817079729
# create exclusive lock for repository
# load indexes
# check all packs
# check snapshots, trees and blobs
# [0:00] 100.00% 1 / 1 snapshots
# no errors were found
try:
for line in output_yielder(command):
if "indexes" in line:
break
if "unable" in line:
raise ValueError(line)
except Exception as error:
raise ValueError("could not lock repository") from error
@unlocked_repo
def restored_size(self, snapshot_id: str) -> int:
"""
Size of a snapshot
"""
command = self.restic_command(
"stats",
snapshot_id,
"--json",
)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
) as handle:
output = handle.communicate()[0].decode("utf-8")
try:
parsed_output = ResticBackupper.parse_json_output(output)
return parsed_output["total_size"]
except ValueError as error:
raise ValueError("cannot restore a snapshot: " + output) from error
@unlocked_repo
def restore_from_backup(
self,
snapshot_id,
folders: List[str],
verify=True,
) -> None:
"""
Restore from backup with restic
"""
if folders is None or folders == []:
raise ValueError("cannot restore without knowing where to!")
with tempfile.TemporaryDirectory() as temp_dir:
if verify:
self._raw_verified_restore(snapshot_id, target=temp_dir)
snapshot_root = temp_dir
for folder in folders:
src = join(snapshot_root, folder.strip("/"))
if not exists(src):
raise ValueError(
f"No such path: {src}. We tried to find {folder}"
)
dst = folder
sync(src, dst)
else: # attempting inplace restore
for folder in folders:
rmtree(folder)
mkdir(folder)
self._raw_verified_restore(snapshot_id, target="/")
return
def _raw_verified_restore(self, snapshot_id, target="/"):
"""barebones restic restore"""
restore_command = self.restic_command(
"restore", snapshot_id, "--target", target, "--verify"
)
with subprocess.Popen(
restore_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
) as handle:
# for some reason restore does not support
# nice reporting of progress via json
output = handle.communicate()[0].decode("utf-8")
if "restoring" not in output:
raise ValueError("cannot restore a snapshot: " + output)
assert (
handle.returncode is not None
) # none should be impossible after communicate
if handle.returncode != 0:
raise ValueError(
"restore exited with errorcode",
handle.returncode,
":",
output,
)
@unlocked_repo
def forget_snapshot(self, snapshot_id) -> None:
"""
Either removes snapshot or marks it for deletion later,
depending on server settings
"""
forget_command = self.restic_command(
"forget",
snapshot_id,
# TODO: prune should be done in a separate process
"--prune",
)
with subprocess.Popen(
forget_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
) as handle:
# for some reason restore does not support
# nice reporting of progress via json
output, err = [
string.decode(
"utf-8",
)
for string in handle.communicate()
]
if "no matching ID found" in err:
raise ValueError(
"trying to delete, but no such snapshot: ", snapshot_id
)
assert (
handle.returncode is not None
) # none should be impossible after communicate
if handle.returncode != 0:
raise ValueError(
"forget exited with errorcode", handle.returncode, ":", output, err
)
def _load_snapshots(self) -> object:
"""
Load list of snapshots from repository
raises Value Error if repo does not exist
"""
listing_command = self.restic_command(
"snapshots",
"--json",
)
with subprocess.Popen(
listing_command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as backup_listing_process_descriptor:
output = backup_listing_process_descriptor.communicate()[0].decode("utf-8")
if "Is there a repository at the following location?" in output:
raise ValueError("No repository! : " + output)
try:
return ResticBackupper.parse_json_output(output)
except ValueError as error:
raise ValueError("Cannot load snapshots: ", output) from error
@unlocked_repo
def get_snapshots(self) -> List[Snapshot]:
"""Get all snapshots from the repo"""
snapshots = []
for restic_snapshot in self._load_snapshots():
# Compatibility with previous snaps:
if len(restic_snapshot["tags"]) == 1:
reason = BackupReason.EXPLICIT
else:
reason = restic_snapshot["tags"][1]
snapshot = Snapshot(
id=restic_snapshot["short_id"],
created_at=restic_snapshot["time"],
service_name=restic_snapshot["tags"][0],
reason=reason,
)
snapshots.append(snapshot)
return snapshots
@staticmethod
def parse_json_output(output: str) -> object:
starting_index = ResticBackupper.json_start(output)
if starting_index == -1:
raise ValueError("There is no json in the restic output: " + output)
truncated_output = output[starting_index:]
json_messages = truncated_output.splitlines()
if len(json_messages) == 1:
try:
return json.loads(truncated_output)
except JSONDecodeError as error:
raise ValueError(
"There is no json in the restic output : " + output
) from error
result_array = []
for message in json_messages:
result_array.append(json.loads(message))
return result_array
@staticmethod
def json_start(output: str) -> int:
indices = [
output.find("["),
output.find("{"),
]
indices = [x for x in indices if x != -1]
if indices == []:
return -1
return min(indices)
@staticmethod
def has_json(output: str) -> bool:
if ResticBackupper.json_start(output) == -1:
return False
return True

View File

@ -0,0 +1,88 @@
from typing import Optional, List
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.jobs import Jobs, Job, JobStatus
from selfprivacy_api.services.service import Service
from selfprivacy_api.services import get_service_by_id
def job_type_prefix(service: Service) -> str:
return f"services.{service.get_id()}"
def backup_job_type(service: Service) -> str:
return f"{job_type_prefix(service)}.backup"
def restore_job_type(service: Service) -> str:
return f"{job_type_prefix(service)}.restore"
def get_jobs_by_service(service: Service) -> List[Job]:
result = []
for job in Jobs.get_jobs():
if job.type_id.startswith(job_type_prefix(service)) and job.status in [
JobStatus.CREATED,
JobStatus.RUNNING,
]:
result.append(job)
return result
def is_something_running_for(service: Service) -> bool:
running_jobs = [
job for job in get_jobs_by_service(service) if job.status == JobStatus.RUNNING
]
return len(running_jobs) != 0
def add_backup_job(service: Service) -> Job:
if is_something_running_for(service):
message = (
f"Cannot start a backup of {service.get_id()}, another operation is running: "
+ get_jobs_by_service(service)[0].type_id
)
raise ValueError(message)
display_name = service.get_display_name()
job = Jobs.add(
type_id=backup_job_type(service),
name=f"Backup {display_name}",
description=f"Backing up {display_name}",
)
return job
def add_restore_job(snapshot: Snapshot) -> Job:
service = get_service_by_id(snapshot.service_name)
if service is None:
raise ValueError(f"no such service: {snapshot.service_name}")
if is_something_running_for(service):
message = (
f"Cannot start a restore of {service.get_id()}, another operation is running: "
+ get_jobs_by_service(service)[0].type_id
)
raise ValueError(message)
display_name = service.get_display_name()
job = Jobs.add(
type_id=restore_job_type(service),
name=f"Restore {display_name}",
description=f"restoring {display_name} from {snapshot.id}",
)
return job
def get_job_by_type(type_id: str) -> Optional[Job]:
for job in Jobs.get_jobs():
if job.type_id == type_id and job.status in [
JobStatus.CREATED,
JobStatus.RUNNING,
]:
return job
def get_backup_job(service: Service) -> Optional[Job]:
return get_job_by_type(backup_job_type(service))
def get_restore_job(service: Service) -> Optional[Job]:
return get_job_by_type(restore_job_type(service))

View File

@ -0,0 +1,45 @@
"""Handling of local secret used for encrypted backups.
Separated out for circular dependency reasons
"""
from __future__ import annotations
import secrets
from selfprivacy_api.utils.redis_pool import RedisPool
REDIS_KEY = "backup:local_secret"
redis = RedisPool().get_connection()
class LocalBackupSecret:
@staticmethod
def get() -> str:
"""A secret string which backblaze/other clouds do not know.
Serves as encryption key.
"""
if not LocalBackupSecret.exists():
LocalBackupSecret.reset()
return redis.get(REDIS_KEY) # type: ignore
@staticmethod
def set(secret: str):
redis.set(REDIS_KEY, secret)
@staticmethod
def reset():
new_secret = LocalBackupSecret._generate()
LocalBackupSecret.set(new_secret)
@staticmethod
def _full_reset():
redis.delete(REDIS_KEY)
@staticmethod
def exists() -> bool:
return redis.exists(REDIS_KEY) == 1
@staticmethod
def _generate() -> str:
return secrets.token_urlsafe(256)

View File

@ -0,0 +1,29 @@
from typing import Type
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
from selfprivacy_api.backup.providers.backblaze import Backblaze
from selfprivacy_api.backup.providers.memory import InMemoryBackup
from selfprivacy_api.backup.providers.local_file import LocalFileBackup
from selfprivacy_api.backup.providers.none import NoBackups
PROVIDER_MAPPING: dict[BackupProviderEnum, Type[AbstractBackupProvider]] = {
BackupProviderEnum.BACKBLAZE: Backblaze,
BackupProviderEnum.MEMORY: InMemoryBackup,
BackupProviderEnum.FILE: LocalFileBackup,
BackupProviderEnum.NONE: NoBackups,
}
def get_provider(
provider_type: BackupProviderEnum,
) -> Type[AbstractBackupProvider]:
return PROVIDER_MAPPING[provider_type]
def get_kind(provider: AbstractBackupProvider) -> str:
"""Get the kind of the provider in the form of a string"""
return provider.name.value

View File

@ -0,0 +1,11 @@
from .provider import AbstractBackupProvider
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
class Backblaze(AbstractBackupProvider):
backupper = ResticBackupper("--b2-account", "--b2-key", ":b2:")
name = BackupProviderEnum.BACKBLAZE

View File

@ -0,0 +1,11 @@
from .provider import AbstractBackupProvider
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
class LocalFileBackup(AbstractBackupProvider):
backupper = ResticBackupper("", "", ":local:")
name = BackupProviderEnum.FILE

View File

@ -0,0 +1,11 @@
from .provider import AbstractBackupProvider
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
class InMemoryBackup(AbstractBackupProvider):
backupper = ResticBackupper("", "", ":memory:")
name = BackupProviderEnum.MEMORY

View File

@ -0,0 +1,11 @@
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
from selfprivacy_api.backup.backuppers.none_backupper import NoneBackupper
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
class NoBackups(AbstractBackupProvider):
backupper = NoneBackupper()
name = BackupProviderEnum.NONE

View File

@ -0,0 +1,25 @@
"""
An abstract class for BackBlaze, S3 etc.
It assumes that while some providers are supported via restic/rclone, others
may require different backends
"""
from abc import ABC, abstractmethod
from selfprivacy_api.backup.backuppers import AbstractBackupper
from selfprivacy_api.graphql.queries.providers import (
BackupProvider as BackupProviderEnum,
)
class AbstractBackupProvider(ABC):
backupper: AbstractBackupper
name: BackupProviderEnum
def __init__(self, login="", key="", location="", repo_id=""):
self.backupper.set_creds(login, key, location)
self.login = login
self.key = key
self.location = location
# We do not need to do anything with this one
# Just remember in case the app forgets
self.repo_id = repo_id

View File

@ -0,0 +1,199 @@
"""
Module for storing backup related data in redis.
"""
from typing import List, Optional
from datetime import datetime
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.models.backup.provider import BackupProviderModel
from selfprivacy_api.graphql.common_types.backup import (
AutobackupQuotas,
_AutobackupQuotas,
)
from selfprivacy_api.utils.redis_pool import RedisPool
from selfprivacy_api.utils.redis_model_storage import (
store_model_as_hash,
hash_as_model,
)
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
from selfprivacy_api.backup.providers import get_kind
REDIS_SNAPSHOTS_PREFIX = "backups:snapshots:"
REDIS_LAST_BACKUP_PREFIX = "backups:last-backed-up:"
REDIS_INITTED_CACHE = "backups:repo_initted"
REDIS_PROVIDER_KEY = "backups:provider"
REDIS_AUTOBACKUP_PERIOD_KEY = "backups:autobackup_period"
REDIS_AUTOBACKUP_QUOTAS_KEY = "backups:autobackup_quotas_key"
redis = RedisPool().get_connection()
class Storage:
"""Static class for storing backup related data in redis"""
@staticmethod
def reset() -> None:
"""Deletes all backup related data from redis"""
redis.delete(REDIS_PROVIDER_KEY)
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
redis.delete(REDIS_INITTED_CACHE)
redis.delete(REDIS_AUTOBACKUP_QUOTAS_KEY)
prefixes_to_clean = [
REDIS_SNAPSHOTS_PREFIX,
REDIS_LAST_BACKUP_PREFIX,
]
for prefix in prefixes_to_clean:
for key in redis.keys(prefix + "*"):
redis.delete(key)
@staticmethod
def invalidate_snapshot_storage() -> None:
"""Deletes all cached snapshots from redis"""
for key in redis.keys(REDIS_SNAPSHOTS_PREFIX + "*"):
redis.delete(key)
@staticmethod
def __last_backup_key(service_id: str) -> str:
return REDIS_LAST_BACKUP_PREFIX + service_id
@staticmethod
def __snapshot_key(snapshot: Snapshot) -> str:
return REDIS_SNAPSHOTS_PREFIX + snapshot.id
@staticmethod
def get_last_backup_time(service_id: str) -> Optional[datetime]:
"""Returns last backup time for a service or None if it was never backed up"""
key = Storage.__last_backup_key(service_id)
if not redis.exists(key):
return None
snapshot = hash_as_model(redis, key, Snapshot)
if not snapshot:
return None
return snapshot.created_at
@staticmethod
def store_last_timestamp(service_id: str, snapshot: Snapshot) -> None:
"""Stores last backup time for a service"""
store_model_as_hash(
redis,
Storage.__last_backup_key(service_id),
snapshot,
)
@staticmethod
def cache_snapshot(snapshot: Snapshot) -> None:
"""Stores snapshot metadata in redis for caching purposes"""
snapshot_key = Storage.__snapshot_key(snapshot)
store_model_as_hash(redis, snapshot_key, snapshot)
@staticmethod
def delete_cached_snapshot(snapshot: Snapshot) -> None:
"""Deletes snapshot metadata from redis"""
snapshot_key = Storage.__snapshot_key(snapshot)
redis.delete(snapshot_key)
@staticmethod
def get_cached_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
"""Returns cached snapshot by id or None if it doesn't exist"""
key = REDIS_SNAPSHOTS_PREFIX + snapshot_id
if not redis.exists(key):
return None
return hash_as_model(redis, key, Snapshot)
@staticmethod
def get_cached_snapshots() -> List[Snapshot]:
"""Returns all cached snapshots stored in redis"""
keys: list[str] = redis.keys(REDIS_SNAPSHOTS_PREFIX + "*") # type: ignore
result: list[Snapshot] = []
for key in keys:
snapshot = hash_as_model(redis, key, Snapshot)
if snapshot:
result.append(snapshot)
return result
@staticmethod
def autobackup_period_minutes() -> Optional[int]:
"""None means autobackup is disabled"""
if not redis.exists(REDIS_AUTOBACKUP_PERIOD_KEY):
return None
return int(redis.get(REDIS_AUTOBACKUP_PERIOD_KEY)) # type: ignore
@staticmethod
def store_autobackup_period_minutes(minutes: int) -> None:
"""Set the new autobackup period in minutes"""
redis.set(REDIS_AUTOBACKUP_PERIOD_KEY, minutes)
@staticmethod
def delete_backup_period() -> None:
"""Set the autobackup period to none, effectively disabling autobackup"""
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
@staticmethod
def store_provider(provider: AbstractBackupProvider) -> None:
"""Stores backup stroage provider auth data in redis"""
store_model_as_hash(
redis,
REDIS_PROVIDER_KEY,
BackupProviderModel(
kind=get_kind(provider),
login=provider.login,
key=provider.key,
location=provider.location,
repo_id=provider.repo_id,
),
)
@staticmethod
def load_provider() -> Optional[BackupProviderModel]:
"""Loads backup storage provider auth data from redis"""
provider_model = hash_as_model(
redis,
REDIS_PROVIDER_KEY,
BackupProviderModel,
)
return provider_model
@staticmethod
def has_init_mark() -> bool:
"""Returns True if the repository was initialized"""
if redis.exists(REDIS_INITTED_CACHE):
return True
return False
@staticmethod
def mark_as_init():
"""Marks the repository as initialized"""
redis.set(REDIS_INITTED_CACHE, 1)
@staticmethod
def mark_as_uninitted():
"""Marks the repository as initialized"""
redis.delete(REDIS_INITTED_CACHE)
@staticmethod
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
store_model_as_hash(redis, REDIS_AUTOBACKUP_QUOTAS_KEY, quotas.to_pydantic())
@staticmethod
def autobackup_quotas() -> AutobackupQuotas:
quotas_model = hash_as_model(
redis, REDIS_AUTOBACKUP_QUOTAS_KEY, _AutobackupQuotas
)
if quotas_model is None:
unlimited_quotas = AutobackupQuotas(
last=-1,
daily=-1,
weekly=-1,
monthly=-1,
yearly=-1,
)
return unlimited_quotas
return AutobackupQuotas.from_pydantic(quotas_model) # pylint: disable=no-member

View File

@ -0,0 +1,63 @@
"""
The tasks module contains the worker tasks that are used to back up and restore
"""
from datetime import datetime, timezone
from selfprivacy_api.graphql.common_types.backup import RestoreStrategy, BackupReason
from selfprivacy_api.models.backup.snapshot import Snapshot
from selfprivacy_api.utils.huey import huey
from huey import crontab
from selfprivacy_api.services.service import Service
from selfprivacy_api.backup import Backups
SNAPSHOT_CACHE_TTL_HOURS = 6
def validate_datetime(dt: datetime) -> bool:
"""
Validates that it is time to back up.
Also ensures that the timezone-aware time is used.
"""
if dt.tzinfo is None:
return Backups.is_time_to_backup(dt.replace(tzinfo=timezone.utc))
return Backups.is_time_to_backup(dt)
# huey tasks need to return something
@huey.task()
def start_backup(
service: Service, reason: BackupReason = BackupReason.EXPLICIT
) -> bool:
"""
The worker task that starts the backup process.
"""
Backups.back_up(service, reason)
return True
@huey.task()
def restore_snapshot(
snapshot: Snapshot,
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
) -> bool:
"""
The worker task that starts the restore process.
"""
Backups.restore_snapshot(snapshot, strategy)
return True
@huey.periodic_task(validate_datetime=validate_datetime)
def automatic_backup():
"""
The worker periodic task that starts the automatic backup process.
"""
time = datetime.utcnow().replace(tzinfo=timezone.utc)
for service in Backups.services_to_back_up(time):
start_backup(service, BackupReason.AUTO)
@huey.periodic_task(crontab(hour=SNAPSHOT_CACHE_TTL_HOURS))
def reload_snapshot_cache():
Backups.force_snapshot_cache_reload()

View File

@ -0,0 +1,35 @@
import subprocess
from os.path import exists
from typing import Generator
def output_yielder(command) -> Generator[str, None, None]:
"""Note: If you break during iteration, it kills the process"""
with subprocess.Popen(
command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
) as handle:
if handle is None or handle.stdout is None:
raise ValueError("could not run command: ", command)
try:
for line in iter(handle.stdout.readline, ""):
if "NOTICE:" not in line:
yield line
except GeneratorExit:
handle.kill()
def sync(src_path: str, dest_path: str):
"""a wrapper around rclone sync"""
if not exists(src_path):
raise ValueError("source dir for rclone sync must exist")
rclone_command = ["rclone", "sync", "-P", src_path, dest_path]
for raw_message in output_yielder(rclone_command):
if "ERROR" in raw_message:
raise ValueError(raw_message)

View File

@ -27,4 +27,4 @@ async def get_token_header(
def get_api_version() -> str:
"""Get API version"""
return "2.1.2"
return "2.4.2"

View File

@ -0,0 +1,36 @@
"""Backup"""
# pylint: disable=too-few-public-methods
from enum import Enum
import strawberry
from pydantic import BaseModel
@strawberry.enum
class RestoreStrategy(Enum):
INPLACE = "INPLACE"
DOWNLOAD_VERIFY_OVERWRITE = "DOWNLOAD_VERIFY_OVERWRITE"
@strawberry.enum
class BackupReason(Enum):
EXPLICIT = "EXPLICIT"
AUTO = "AUTO"
PRE_RESTORE = "PRE_RESTORE"
class _AutobackupQuotas(BaseModel):
last: int
daily: int
weekly: int
monthly: int
yearly: int
@strawberry.experimental.pydantic.type(model=_AutobackupQuotas, all_fields=True)
class AutobackupQuotas:
pass
@strawberry.experimental.pydantic.input(model=_AutobackupQuotas, all_fields=True)
class AutobackupQuotasInput:
pass

View File

@ -12,6 +12,7 @@ class ApiJob:
"""Job type for GraphQL."""
uid: str
type_id: str
name: str
description: str
status: str
@ -28,6 +29,7 @@ def job_to_api_job(job: Job) -> ApiJob:
"""Convert a Job from jobs controller to a GraphQL ApiJob."""
return ApiJob(
uid=str(job.uid),
type_id=job.type_id,
name=job.name,
description=job.description,
status=job.status.name,

View File

@ -1,6 +1,8 @@
from enum import Enum
import typing
import strawberry
import datetime
from selfprivacy_api.graphql.common_types.backup import BackupReason
from selfprivacy_api.graphql.common_types.dns import DnsRecord
from selfprivacy_api.services import get_service_by_id, get_services_by_location
@ -15,7 +17,7 @@ def get_usages(root: "StorageVolume") -> list["StorageUsageInterface"]:
service=service_to_graphql_service(service),
title=service.get_display_name(),
used_space=str(service.get_storage_usage()),
volume=get_volume_by_id(service.get_location()),
volume=get_volume_by_id(service.get_drive()),
)
for service in get_services_by_location(root.name)
]
@ -79,7 +81,7 @@ def get_storage_usage(root: "Service") -> ServiceStorageUsage:
service=service_to_graphql_service(service),
title=service.get_display_name(),
used_space=str(service.get_storage_usage()),
volume=get_volume_by_id(service.get_location()),
volume=get_volume_by_id(service.get_drive()),
)
@ -92,6 +94,8 @@ class Service:
is_movable: bool
is_required: bool
is_enabled: bool
can_be_backed_up: bool
backup_description: str
status: ServiceStatusEnum
url: typing.Optional[str]
dns_records: typing.Optional[typing.List[DnsRecord]]
@ -101,6 +105,18 @@ class Service:
"""Get storage usage for a service"""
return get_storage_usage(self)
@strawberry.field
def backup_snapshots(self) -> typing.Optional[typing.List["SnapshotInfo"]]:
return None
@strawberry.type
class SnapshotInfo:
id: str
service: Service
created_at: datetime.datetime
reason: BackupReason
def service_to_graphql_service(service: ServiceInterface) -> Service:
"""Convert service to graphql service"""
@ -112,6 +128,8 @@ def service_to_graphql_service(service: ServiceInterface) -> Service:
is_movable=service.is_movable(),
is_required=service.is_required(),
is_enabled=service.is_enabled(),
can_be_backed_up=service.can_be_backed_up(),
backup_description=service.get_backup_description(),
status=ServiceStatusEnum(service.get_status().value),
url=service.get_url(),
dns_records=[

View File

@ -0,0 +1,227 @@
import typing
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericMutationReturn,
GenericJobMutationReturn,
MutationReturnInterface,
)
from selfprivacy_api.graphql.queries.backup import BackupConfiguration
from selfprivacy_api.graphql.queries.backup import Backup
from selfprivacy_api.graphql.queries.providers import BackupProvider
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
from selfprivacy_api.graphql.common_types.backup import (
AutobackupQuotasInput,
RestoreStrategy,
)
from selfprivacy_api.backup import Backups
from selfprivacy_api.services import get_service_by_id
from selfprivacy_api.backup.tasks import start_backup, restore_snapshot
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
@strawberry.input
class InitializeRepositoryInput:
"""Initialize repository input"""
provider: BackupProvider
# The following field may become optional for other providers?
# Backblaze takes bucket id and name
location_id: str
location_name: str
# Key ID and key for Backblaze
login: str
password: str
@strawberry.type
class GenericBackupConfigReturn(MutationReturnInterface):
"""Generic backup config return"""
configuration: typing.Optional[BackupConfiguration]
@strawberry.type
class BackupMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def initialize_repository(
self, repository: InitializeRepositoryInput
) -> GenericBackupConfigReturn:
"""Initialize a new repository"""
Backups.set_provider(
kind=repository.provider,
login=repository.login,
key=repository.password,
location=repository.location_name,
repo_id=repository.location_id,
)
Backups.init_repo()
return GenericBackupConfigReturn(
success=True,
message="",
code=200,
configuration=Backup().configuration(),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def remove_repository(self) -> GenericBackupConfigReturn:
"""Remove repository"""
Backups.reset()
return GenericBackupConfigReturn(
success=True,
message="",
code=200,
configuration=Backup().configuration(),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def set_autobackup_period(
self, period: typing.Optional[int] = None
) -> GenericBackupConfigReturn:
"""Set autobackup period. None is to disable autobackup"""
if period is not None:
Backups.set_autobackup_period_minutes(period)
else:
Backups.set_autobackup_period_minutes(0)
return GenericBackupConfigReturn(
success=True,
message="",
code=200,
configuration=Backup().configuration(),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def set_autobackup_quotas(
self, quotas: AutobackupQuotasInput
) -> GenericBackupConfigReturn:
"""
Set autobackup quotas.
Values <=0 for any timeframe mean no limits for that timeframe.
To disable autobackup use autobackup period setting, not this mutation.
"""
try:
Backups.set_autobackup_quotas(quotas)
return GenericBackupConfigReturn(
success=True,
message="",
code=200,
configuration=Backup().configuration(),
)
except Exception as e:
return GenericBackupConfigReturn(
success=False,
message=str(e),
code=400,
configuration=Backup().configuration(),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def start_backup(self, service_id: str) -> GenericJobMutationReturn:
"""Start backup"""
service = get_service_by_id(service_id)
if service is None:
return GenericJobMutationReturn(
success=False,
code=300,
message=f"nonexistent service: {service_id}",
job=None,
)
job = add_backup_job(service)
start_backup(service)
return GenericJobMutationReturn(
success=True,
code=200,
message="Backup job queued",
job=job_to_api_job(job),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def restore_backup(
self,
snapshot_id: str,
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
) -> GenericJobMutationReturn:
"""Restore backup"""
snap = Backups.get_snapshot_by_id(snapshot_id)
if snap is None:
return GenericJobMutationReturn(
success=False,
code=404,
message=f"No such snapshot: {snapshot_id}",
job=None,
)
service = get_service_by_id(snap.service_name)
if service is None:
return GenericJobMutationReturn(
success=False,
code=404,
message=f"nonexistent service: {snap.service_name}",
job=None,
)
try:
job = add_restore_job(snap)
except ValueError as error:
return GenericJobMutationReturn(
success=False,
code=400,
message=str(error),
job=None,
)
restore_snapshot(snap, strategy)
return GenericJobMutationReturn(
success=True,
code=200,
message="restore job created",
job=job_to_api_job(job),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def forget_snapshot(self, snapshot_id: str) -> GenericMutationReturn:
"""Forget a snapshot.
Makes it inaccessible from the server.
After some time, the data (encrypted) will not be recoverable
from the backup server too, but not immediately"""
snap = Backups.get_snapshot_by_id(snapshot_id)
if snap is None:
return GenericMutationReturn(
success=False,
code=404,
message=f"snapshot {snapshot_id} not found",
)
try:
Backups.forget_snapshot(snap)
return GenericMutationReturn(
success=True,
code=200,
message="",
)
except Exception as error:
return GenericMutationReturn(
success=False,
code=400,
message=str(error),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def force_snapshots_reload(self) -> GenericMutationReturn:
"""Force snapshots reload"""
Backups.force_snapshot_cache_reload()
return GenericMutationReturn(
success=True,
code=200,
message="",
)

View File

@ -0,0 +1,215 @@
"""Deprecated mutations
There was made a mistake, where mutations were not grouped, and were instead
placed in the root of mutations schema. In this file, we import all the
mutations from and provide them to the root for backwards compatibility.
"""
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.common_types.user import UserMutationReturn
from selfprivacy_api.graphql.mutations.api_mutations import (
ApiKeyMutationReturn,
ApiMutations,
DeviceApiTokenMutationReturn,
)
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobMutationReturn,
GenericMutationReturn,
)
from selfprivacy_api.graphql.mutations.services_mutations import (
ServiceMutationReturn,
ServicesMutations,
)
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
from selfprivacy_api.graphql.mutations.system_mutations import (
AutoUpgradeSettingsMutationReturn,
SystemMutations,
TimezoneMutationReturn,
)
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
def deprecated_mutation(func, group, auth=True):
return strawberry.mutation(
resolver=func,
permission_classes=[IsAuthenticated] if auth else [],
deprecation_reason=f"Use `{group}.{func.__name__}` instead",
)
@strawberry.type
class DeprecatedApiMutations:
get_new_recovery_api_key: ApiKeyMutationReturn = deprecated_mutation(
ApiMutations.get_new_recovery_api_key,
"api",
)
use_recovery_api_key: DeviceApiTokenMutationReturn = deprecated_mutation(
ApiMutations.use_recovery_api_key,
"api",
auth=False,
)
refresh_device_api_token: DeviceApiTokenMutationReturn = deprecated_mutation(
ApiMutations.refresh_device_api_token,
"api",
)
delete_device_api_token: GenericMutationReturn = deprecated_mutation(
ApiMutations.delete_device_api_token,
"api",
)
get_new_device_api_key: ApiKeyMutationReturn = deprecated_mutation(
ApiMutations.get_new_device_api_key,
"api",
)
invalidate_new_device_api_key: GenericMutationReturn = deprecated_mutation(
ApiMutations.invalidate_new_device_api_key,
"api",
)
authorize_with_new_device_api_key: DeviceApiTokenMutationReturn = (
deprecated_mutation(
ApiMutations.authorize_with_new_device_api_key,
"api",
auth=False,
)
)
@strawberry.type
class DeprecatedSystemMutations:
change_timezone: TimezoneMutationReturn = deprecated_mutation(
SystemMutations.change_timezone,
"system",
)
change_auto_upgrade_settings: AutoUpgradeSettingsMutationReturn = (
deprecated_mutation(
SystemMutations.change_auto_upgrade_settings,
"system",
)
)
run_system_rebuild: GenericMutationReturn = deprecated_mutation(
SystemMutations.run_system_rebuild,
"system",
)
run_system_rollback: GenericMutationReturn = deprecated_mutation(
SystemMutations.run_system_rollback,
"system",
)
run_system_upgrade: GenericMutationReturn = deprecated_mutation(
SystemMutations.run_system_upgrade,
"system",
)
reboot_system: GenericMutationReturn = deprecated_mutation(
SystemMutations.reboot_system,
"system",
)
pull_repository_changes: GenericMutationReturn = deprecated_mutation(
SystemMutations.pull_repository_changes,
"system",
)
@strawberry.type
class DeprecatedUsersMutations:
create_user: UserMutationReturn = deprecated_mutation(
UsersMutations.create_user,
"users",
)
delete_user: GenericMutationReturn = deprecated_mutation(
UsersMutations.delete_user,
"users",
)
update_user: UserMutationReturn = deprecated_mutation(
UsersMutations.update_user,
"users",
)
add_ssh_key: UserMutationReturn = deprecated_mutation(
UsersMutations.add_ssh_key,
"users",
)
remove_ssh_key: UserMutationReturn = deprecated_mutation(
UsersMutations.remove_ssh_key,
"users",
)
@strawberry.type
class DeprecatedStorageMutations:
resize_volume: GenericMutationReturn = deprecated_mutation(
StorageMutations.resize_volume,
"storage",
)
mount_volume: GenericMutationReturn = deprecated_mutation(
StorageMutations.mount_volume,
"storage",
)
unmount_volume: GenericMutationReturn = deprecated_mutation(
StorageMutations.unmount_volume,
"storage",
)
migrate_to_binds: GenericJobMutationReturn = deprecated_mutation(
StorageMutations.migrate_to_binds,
"storage",
)
@strawberry.type
class DeprecatedServicesMutations:
enable_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.enable_service,
"services",
)
disable_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.disable_service,
"services",
)
stop_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.stop_service,
"services",
)
start_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.start_service,
"services",
)
restart_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.restart_service,
"services",
)
move_service: ServiceMutationReturn = deprecated_mutation(
ServicesMutations.move_service,
"services",
)
@strawberry.type
class DeprecatedJobMutations:
remove_job: GenericMutationReturn = deprecated_mutation(
JobMutations.remove_job,
"jobs",
)

View File

@ -17,5 +17,5 @@ class GenericMutationReturn(MutationReturnInterface):
@strawberry.type
class GenericJobButationReturn(MutationReturnInterface):
class GenericJobMutationReturn(MutationReturnInterface):
job: typing.Optional[ApiJob] = None

View File

@ -10,7 +10,7 @@ from selfprivacy_api.graphql.common_types.service import (
service_to_graphql_service,
)
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobButationReturn,
GenericJobMutationReturn,
GenericMutationReturn,
)
@ -34,7 +34,7 @@ class MoveServiceInput:
@strawberry.type
class ServiceJobMutationReturn(GenericJobButationReturn):
class ServiceJobMutationReturn(GenericJobMutationReturn):
"""Service job mutation return type."""
service: typing.Optional[Service] = None

View File

@ -1,102 +0,0 @@
#!/usr/bin/env python3
"""Users management module"""
# pylint: disable=too-few-public-methods
import strawberry
from selfprivacy_api.actions.users import UserNotFound
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.actions.ssh import (
InvalidPublicKey,
KeyAlreadyExists,
KeyNotFound,
create_ssh_key,
remove_ssh_key,
)
from selfprivacy_api.graphql.common_types.user import (
UserMutationReturn,
get_user_by_username,
)
@strawberry.input
class SshMutationInput:
"""Input type for ssh mutation"""
username: str
ssh_key: str
@strawberry.type
class SshMutations:
"""Mutations ssh"""
@strawberry.mutation(permission_classes=[IsAuthenticated])
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
"""Add a new ssh key"""
try:
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
except KeyAlreadyExists:
return UserMutationReturn(
success=False,
message="Key already exists",
code=409,
)
except InvalidPublicKey:
return UserMutationReturn(
success=False,
message="Invalid key type. Only ssh-ed25519 and ssh-rsa are supported",
code=400,
)
except UserNotFound:
return UserMutationReturn(
success=False,
message="User not found",
code=404,
)
except Exception as e:
return UserMutationReturn(
success=False,
message=str(e),
code=500,
)
return UserMutationReturn(
success=True,
message="New SSH key successfully written",
code=201,
user=get_user_by_username(ssh_input.username),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
"""Remove ssh key from user"""
try:
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
except KeyNotFound:
return UserMutationReturn(
success=False,
message="Key not found",
code=404,
)
except UserNotFound:
return UserMutationReturn(
success=False,
message="User not found",
code=404,
)
except Exception as e:
return UserMutationReturn(
success=False,
message=str(e),
code=500,
)
return UserMutationReturn(
success=True,
message="SSH key successfully removed",
code=200,
user=get_user_by_username(ssh_input.username),
)

View File

@ -4,7 +4,7 @@ from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
from selfprivacy_api.utils.block_devices import BlockDevices
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericJobButationReturn,
GenericJobMutationReturn,
GenericMutationReturn,
)
from selfprivacy_api.jobs.migrate_to_binds import (
@ -79,10 +79,10 @@ class StorageMutations:
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobButationReturn:
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobMutationReturn:
"""Migrate to binds"""
if is_bind_migrated():
return GenericJobButationReturn(
return GenericJobMutationReturn(
success=False, code=409, message="Already migrated to binds"
)
job = start_bind_migration(
@ -94,7 +94,7 @@ class StorageMutations:
pleroma_block_device=input.pleroma_block_device,
)
)
return GenericJobButationReturn(
return GenericJobMutationReturn(
success=True,
code=200,
message="Migration to binds started, rebuild the system to apply changes",

View File

@ -3,10 +3,18 @@
# pylint: disable=too-few-public-methods
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.actions.users import UserNotFound
from selfprivacy_api.graphql.common_types.user import (
UserMutationReturn,
get_user_by_username,
)
from selfprivacy_api.actions.ssh import (
InvalidPublicKey,
KeyAlreadyExists,
KeyNotFound,
create_ssh_key,
remove_ssh_key,
)
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericMutationReturn,
)
@ -21,8 +29,16 @@ class UserMutationInput:
password: str
@strawberry.input
class SshMutationInput:
"""Input type for ssh mutation"""
username: str
ssh_key: str
@strawberry.type
class UserMutations:
class UsersMutations:
"""Mutations change user settings"""
@strawberry.mutation(permission_classes=[IsAuthenticated])
@ -115,3 +131,73 @@ class UserMutations:
code=200,
user=get_user_by_username(user.username),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
"""Add a new ssh key"""
try:
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
except KeyAlreadyExists:
return UserMutationReturn(
success=False,
message="Key already exists",
code=409,
)
except InvalidPublicKey:
return UserMutationReturn(
success=False,
message="Invalid key type. Only ssh-ed25519, ssh-rsa and ecdsa are supported",
code=400,
)
except UserNotFound:
return UserMutationReturn(
success=False,
message="User not found",
code=404,
)
except Exception as e:
return UserMutationReturn(
success=False,
message=str(e),
code=500,
)
return UserMutationReturn(
success=True,
message="New SSH key successfully written",
code=201,
user=get_user_by_username(ssh_input.username),
)
@strawberry.mutation(permission_classes=[IsAuthenticated])
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
"""Remove ssh key from user"""
try:
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
except KeyNotFound:
return UserMutationReturn(
success=False,
message="Key not found",
code=404,
)
except UserNotFound:
return UserMutationReturn(
success=False,
message="User not found",
code=404,
)
except Exception as e:
return UserMutationReturn(
success=False,
message=str(e),
code=500,
)
return UserMutationReturn(
success=True,
message="SSH key successfully removed",
code=200,
user=get_user_by_username(ssh_input.username),
)

View File

@ -0,0 +1,83 @@
"""Backup"""
# pylint: disable=too-few-public-methods
import typing
import strawberry
from selfprivacy_api.backup import Backups
from selfprivacy_api.backup.local_secret import LocalBackupSecret
from selfprivacy_api.graphql.queries.providers import BackupProvider
from selfprivacy_api.graphql.common_types.service import (
Service,
ServiceStatusEnum,
SnapshotInfo,
service_to_graphql_service,
)
from selfprivacy_api.graphql.common_types.backup import AutobackupQuotas
from selfprivacy_api.services import get_service_by_id
@strawberry.type
class BackupConfiguration:
provider: BackupProvider
# When server is lost, the app should have the key to decrypt backups
# on a new server
encryption_key: str
# False when repo is not initialized and not ready to be used
is_initialized: bool
# If none, autobackups are disabled
autobackup_period: typing.Optional[int]
# None is equal to all quotas being unlimited (-1). Optional for compatibility reasons.
autobackup_quotas: AutobackupQuotas
# Bucket name for Backblaze, path for some other providers
location_name: typing.Optional[str]
location_id: typing.Optional[str]
@strawberry.type
class Backup:
@strawberry.field
def configuration(self) -> BackupConfiguration:
return BackupConfiguration(
provider=Backups.provider().name,
encryption_key=LocalBackupSecret.get(),
is_initialized=Backups.is_initted(),
autobackup_period=Backups.autobackup_period_minutes(),
location_name=Backups.provider().location,
location_id=Backups.provider().repo_id,
autobackup_quotas=Backups.autobackup_quotas(),
)
@strawberry.field
def all_snapshots(self) -> typing.List[SnapshotInfo]:
if not Backups.is_initted():
return []
result = []
snapshots = Backups.get_all_snapshots()
for snap in snapshots:
service = get_service_by_id(snap.service_name)
if service is None:
service = Service(
id=snap.service_name,
display_name=f"{snap.service_name} (Orphaned)",
description="",
svg_icon="",
is_movable=False,
is_required=False,
is_enabled=False,
status=ServiceStatusEnum.OFF,
url=None,
dns_records=None,
can_be_backed_up=False,
backup_description="",
)
else:
service = service_to_graphql_service(service)
graphql_snap = SnapshotInfo(
id=snap.id,
service=service,
created_at=snap.created_at,
reason=snap.reason,
)
result.append(graphql_snap)
return result

View File

@ -19,3 +19,7 @@ class ServerProvider(Enum):
@strawberry.enum
class BackupProvider(Enum):
BACKBLAZE = "BACKBLAZE"
NONE = "NONE"
# for testing purposes, make sure not selectable in prod.
MEMORY = "MEMORY"
FILE = "FILE"

View File

@ -23,7 +23,7 @@ class Storage:
else str(volume.size),
free_space=str(volume.fsavail),
used_space=str(volume.fsused),
root=volume.name == "sda1",
root=volume.is_root(),
name=volume.name,
model=volume.model,
serial=volume.serial,

View File

@ -5,21 +5,30 @@ import asyncio
from typing import AsyncGenerator
import strawberry
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.mutations.deprecated_mutations import (
DeprecatedApiMutations,
DeprecatedJobMutations,
DeprecatedServicesMutations,
DeprecatedStorageMutations,
DeprecatedSystemMutations,
DeprecatedUsersMutations,
)
from selfprivacy_api.graphql.mutations.api_mutations import ApiMutations
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
from selfprivacy_api.graphql.mutations.services_mutations import ServicesMutations
from selfprivacy_api.graphql.mutations.ssh_mutations import SshMutations
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
from selfprivacy_api.graphql.mutations.system_mutations import SystemMutations
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
from selfprivacy_api.graphql.queries.api_queries import Api
from selfprivacy_api.graphql.queries.backup import Backup
from selfprivacy_api.graphql.queries.jobs import Job
from selfprivacy_api.graphql.queries.services import Services
from selfprivacy_api.graphql.queries.storage import Storage
from selfprivacy_api.graphql.queries.system import System
from selfprivacy_api.graphql.mutations.users_mutations import UserMutations
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
from selfprivacy_api.graphql.queries.users import Users
from selfprivacy_api.jobs.test import test_job
@ -28,16 +37,16 @@ from selfprivacy_api.jobs.test import test_job
class Query:
"""Root schema for queries"""
@strawberry.field(permission_classes=[IsAuthenticated])
def system(self) -> System:
"""System queries"""
return System()
@strawberry.field
def api(self) -> Api:
"""API access status"""
return Api()
@strawberry.field(permission_classes=[IsAuthenticated])
def system(self) -> System:
"""System queries"""
return System()
@strawberry.field(permission_classes=[IsAuthenticated])
def users(self) -> Users:
"""Users queries"""
@ -58,19 +67,58 @@ class Query:
"""Services queries"""
return Services()
@strawberry.field(permission_classes=[IsAuthenticated])
def backup(self) -> Backup:
"""Backup queries"""
return Backup()
@strawberry.type
class Mutation(
ApiMutations,
SystemMutations,
UserMutations,
SshMutations,
StorageMutations,
ServicesMutations,
JobMutations,
DeprecatedApiMutations,
DeprecatedSystemMutations,
DeprecatedUsersMutations,
DeprecatedStorageMutations,
DeprecatedServicesMutations,
DeprecatedJobMutations,
):
"""Root schema for mutations"""
@strawberry.field
def api(self) -> ApiMutations:
"""API mutations"""
return ApiMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def system(self) -> SystemMutations:
"""System mutations"""
return SystemMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def users(self) -> UsersMutations:
"""Users mutations"""
return UsersMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def storage(self) -> StorageMutations:
"""Storage mutations"""
return StorageMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def services(self) -> ServicesMutations:
"""Services mutations"""
return ServicesMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def jobs(self) -> JobMutations:
"""Jobs mutations"""
return JobMutations()
@strawberry.field(permission_classes=[IsAuthenticated])
def backup(self) -> BackupMutations:
"""Backup mutations"""
return BackupMutations()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def test_mutation(self) -> GenericMutationReturn:
"""Test mutation"""
@ -95,4 +143,8 @@ class Subscription:
await asyncio.sleep(0.5)
schema = strawberry.Schema(query=Query, mutation=Mutation, subscription=Subscription)
schema = strawberry.Schema(
query=Query,
mutation=Mutation,
subscription=Subscription,
)

View File

@ -26,8 +26,11 @@ from selfprivacy_api.utils.redis_pool import RedisPool
JOB_EXPIRATION_SECONDS = 10 * 24 * 60 * 60 # ten days
STATUS_LOGS_PREFIX = "jobs_logs:status:"
PROGRESS_LOGS_PREFIX = "jobs_logs:progress:"
class JobStatus(Enum):
class JobStatus(str, Enum):
"""
Status of a job.
"""
@ -70,6 +73,7 @@ class Jobs:
jobs = Jobs.get_jobs()
for job in jobs:
Jobs.remove(job)
Jobs.reset_logs()
@staticmethod
def add(
@ -120,6 +124,60 @@ class Jobs:
return True
return False
@staticmethod
def reset_logs() -> None:
redis = RedisPool().get_connection()
for key in redis.keys(STATUS_LOGS_PREFIX + "*"):
redis.delete(key)
@staticmethod
def log_status_update(job: Job, status: JobStatus) -> None:
redis = RedisPool().get_connection()
key = _status_log_key_from_uuid(job.uid)
redis.lpush(key, status.value)
redis.expire(key, 10)
@staticmethod
def log_progress_update(job: Job, progress: int) -> None:
redis = RedisPool().get_connection()
key = _progress_log_key_from_uuid(job.uid)
redis.lpush(key, progress)
redis.expire(key, 10)
@staticmethod
def status_updates(job: Job) -> list[JobStatus]:
result: list[JobStatus] = []
redis = RedisPool().get_connection()
key = _status_log_key_from_uuid(job.uid)
if not redis.exists(key):
return []
status_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
for status in status_strings:
try:
result.append(JobStatus[status])
except KeyError as error:
raise ValueError("impossible job status: " + status) from error
return result
@staticmethod
def progress_updates(job: Job) -> list[int]:
result: list[int] = []
redis = RedisPool().get_connection()
key = _progress_log_key_from_uuid(job.uid)
if not redis.exists(key):
return []
progress_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
for progress in progress_strings:
try:
result.append(int(progress))
except KeyError as error:
raise ValueError("impossible job progress: " + progress) from error
return result
@staticmethod
def update(
job: Job,
@ -140,9 +198,17 @@ class Jobs:
job.description = description
if status_text is not None:
job.status_text = status_text
if progress is not None:
# if it is finished it is 100
# unless user says otherwise
if status == JobStatus.FINISHED and progress is None:
progress = 100
if progress is not None and job.progress != progress:
job.progress = progress
Jobs.log_progress_update(job, progress)
job.status = status
Jobs.log_status_update(job, status)
job.updated_at = datetime.datetime.now()
job.error = error
job.result = result
@ -158,6 +224,14 @@ class Jobs:
return job
@staticmethod
def set_expiration(job: Job, expiration_seconds: int) -> Job:
redis = RedisPool().get_connection()
key = _redis_key_from_uuid(job.uid)
if redis.exists(key):
redis.expire(key, expiration_seconds)
return job
@staticmethod
def get_job(uid: str) -> typing.Optional[Job]:
"""
@ -194,11 +268,19 @@ class Jobs:
return False
def _redis_key_from_uuid(uuid_string):
def _redis_key_from_uuid(uuid_string) -> str:
return "jobs:" + str(uuid_string)
def _store_job_as_hash(redis, redis_key, model):
def _status_log_key_from_uuid(uuid_string) -> str:
return STATUS_LOGS_PREFIX + str(uuid_string)
def _progress_log_key_from_uuid(uuid_string) -> str:
return PROGRESS_LOGS_PREFIX + str(uuid_string)
def _store_job_as_hash(redis, redis_key, model) -> None:
for key, value in model.dict().items():
if isinstance(value, uuid.UUID):
value = str(value)
@ -209,7 +291,7 @@ def _store_job_as_hash(redis, redis_key, model):
redis.hset(redis_key, key, str(value))
def _job_from_hash(redis, redis_key):
def _job_from_hash(redis, redis_key) -> typing.Optional[Job]:
if redis.exists(redis_key):
job_dict = redis.hgetall(redis_key)
for date in [

View File

@ -22,6 +22,9 @@ from selfprivacy_api.migrations.providers import CreateProviderFields
from selfprivacy_api.migrations.prepare_for_nixos_2211 import (
MigrateToSelfprivacyChannelFrom2205,
)
from selfprivacy_api.migrations.prepare_for_nixos_2305 import (
MigrateToSelfprivacyChannelFrom2211,
)
from selfprivacy_api.migrations.redis_tokens import LoadTokensToRedis
migrations = [
@ -32,6 +35,7 @@ migrations = [
CheckForFailedBindsMigration(),
CreateProviderFields(),
MigrateToSelfprivacyChannelFrom2205(),
MigrateToSelfprivacyChannelFrom2211(),
LoadTokensToRedis(),
]

View File

@ -0,0 +1,58 @@
import os
import subprocess
from selfprivacy_api.migrations.migration import Migration
class MigrateToSelfprivacyChannelFrom2211(Migration):
"""Migrate to selfprivacy Nix channel.
For some reason NixOS 22.11 servers initialized with the nixos channel instead of selfprivacy.
This stops us from upgrading to NixOS 23.05
"""
def get_migration_name(self):
return "migrate_to_selfprivacy_channel_from_2211"
def get_migration_description(self):
return "Migrate to selfprivacy Nix channel from NixOS 22.11."
def is_migration_needed(self):
try:
output = subprocess.check_output(
["nix-channel", "--list"], start_new_session=True
)
output = output.decode("utf-8")
first_line = output.split("\n", maxsplit=1)[0]
return first_line.startswith("nixos") and (
first_line.endswith("nixos-22.11")
)
except subprocess.CalledProcessError:
return False
def migrate(self):
# Change the channel and update them.
# Also, go to /etc/nixos directory and make a git pull
current_working_directory = os.getcwd()
try:
print("Changing channel")
os.chdir("/etc/nixos")
subprocess.check_output(
[
"nix-channel",
"--add",
"https://channel.selfprivacy.org/nixos-selfpricacy",
"nixos",
]
)
subprocess.check_output(["nix-channel", "--update"])
nixos_config_branch = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"], start_new_session=True
)
if nixos_config_branch.decode("utf-8").strip() == "api-redis":
print("Also changing nixos-config branch from api-redis to master")
subprocess.check_output(["git", "checkout", "master"])
subprocess.check_output(["git", "pull"])
os.chdir(current_working_directory)
except subprocess.CalledProcessError:
os.chdir(current_working_directory)
print("Error")

View File

@ -0,0 +1,11 @@
from pydantic import BaseModel
"""for storage in Redis"""
class BackupProviderModel(BaseModel):
kind: str
login: str
key: str
location: str
repo_id: str # for app usage, not for us

View File

@ -0,0 +1,11 @@
import datetime
from pydantic import BaseModel
from selfprivacy_api.graphql.common_types.backup import BackupReason
class Snapshot(BaseModel):
id: str
service_name: str
created_at: datetime.datetime
reason: BackupReason = BackupReason.EXPLICIT

View File

@ -1,8 +1,8 @@
"""
Token repository using Redis as backend.
"""
from typing import Optional
from datetime import datetime, timezone
from typing import Any, Optional
from datetime import datetime
from hashlib import md5
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
@ -29,15 +29,15 @@ class RedisTokensRepository(AbstractTokensRepository):
@staticmethod
def token_key_for_device(device_name: str):
hash = md5()
hash.update(bytes(device_name, "utf-8"))
digest = hash.hexdigest()
md5_hash = md5()
md5_hash.update(bytes(device_name, "utf-8"))
digest = md5_hash.hexdigest()
return TOKENS_PREFIX + digest
def get_tokens(self) -> list[Token]:
"""Get the tokens"""
redis = self.connection
token_keys = redis.keys(TOKENS_PREFIX + "*")
token_keys: list[str] = redis.keys(TOKENS_PREFIX + "*") # type: ignore
tokens = []
for key in token_keys:
token = self._token_from_hash(key)
@ -45,10 +45,10 @@ class RedisTokensRepository(AbstractTokensRepository):
tokens.append(token)
return tokens
def _discover_token_key(self, input_token: Token) -> str:
def _discover_token_key(self, input_token: Token) -> Optional[str]:
"""brute-force searching for tokens, for robust deletion"""
redis = self.connection
token_keys = redis.keys(TOKENS_PREFIX + "*")
token_keys: list[str] = redis.keys(TOKENS_PREFIX + "*") # type: ignore
for key in token_keys:
token = self._token_from_hash(key)
if token == input_token:
@ -111,26 +111,28 @@ class RedisTokensRepository(AbstractTokensRepository):
return self._new_device_key_from_hash(NEW_DEVICE_KEY_REDIS_KEY)
@staticmethod
def _is_date_key(key: str):
def _is_date_key(key: str) -> bool:
return key in [
"created_at",
"expires_at",
]
@staticmethod
def _prepare_model_dict(d: dict):
date_keys = [key for key in d.keys() if RedisTokensRepository._is_date_key(key)]
def _prepare_model_dict(model_dict: dict[str, Any]) -> None:
date_keys = [
key for key in model_dict.keys() if RedisTokensRepository._is_date_key(key)
]
for date in date_keys:
if d[date] != "None":
d[date] = datetime.fromisoformat(d[date])
for key in d.keys():
if d[key] == "None":
d[key] = None
if model_dict[date] != "None":
model_dict[date] = datetime.fromisoformat(model_dict[date])
for key in model_dict.keys():
if model_dict[key] == "None":
model_dict[key] = None
def _model_dict_from_hash(self, redis_key: str) -> Optional[dict]:
def _model_dict_from_hash(self, redis_key: str) -> Optional[dict[str, Any]]:
redis = self.connection
if redis.exists(redis_key):
token_dict = redis.hgetall(redis_key)
token_dict: dict[str, Any] = redis.hgetall(redis_key) # type: ignore
RedisTokensRepository._prepare_model_dict(token_dict)
return token_dict
return None

View File

@ -16,8 +16,6 @@ from selfprivacy_api.actions.ssh import (
from selfprivacy_api.actions.users import UserNotFound, get_user_by_username
from selfprivacy_api.dependencies import get_token_header
from selfprivacy_api.restic_controller import ResticController, ResticStates
from selfprivacy_api.restic_controller import tasks as restic_tasks
from selfprivacy_api.services.bitwarden import Bitwarden
from selfprivacy_api.services.gitea import Gitea
from selfprivacy_api.services.mailserver import MailServer
@ -25,7 +23,7 @@ from selfprivacy_api.services.nextcloud import Nextcloud
from selfprivacy_api.services.ocserv import Ocserv
from selfprivacy_api.services.pleroma import Pleroma
from selfprivacy_api.services.service import ServiceStatus
from selfprivacy_api.utils import WriteUserData, get_dkim_key, get_domain
from selfprivacy_api.utils import get_dkim_key, get_domain
router = APIRouter(
prefix="/services",
@ -186,44 +184,34 @@ async def disable_pleroma():
@router.get("/restic/backup/list")
async def get_restic_backup_list():
restic = ResticController()
return restic.snapshot_list
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.put("/restic/backup/create")
async def create_restic_backup():
restic = ResticController()
if restic.state is ResticStates.NO_KEY:
raise HTTPException(status_code=400, detail="Backup key not provided")
if restic.state is ResticStates.INITIALIZING:
raise HTTPException(status_code=400, detail="Backup is initializing")
if restic.state is ResticStates.BACKING_UP:
raise HTTPException(status_code=409, detail="Backup is already running")
restic_tasks.start_backup()
return {
"status": 0,
"message": "Backup creation has started",
}
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.get("/restic/backup/status")
async def get_restic_backup_status():
restic = ResticController()
return {
"status": restic.state.name,
"progress": restic.progress,
"error_message": restic.error_message,
}
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.get("/restic/backup/reload")
async def reload_restic_backup():
restic_tasks.load_snapshots()
return {
"status": 0,
"message": "Snapshots reload started",
}
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
class BackupRestoreInput(BaseModel):
@ -232,29 +220,10 @@ class BackupRestoreInput(BaseModel):
@router.put("/restic/backup/restore")
async def restore_restic_backup(backup: BackupRestoreInput):
restic = ResticController()
if restic.state is ResticStates.NO_KEY:
raise HTTPException(status_code=400, detail="Backup key not provided")
if restic.state is ResticStates.NOT_INITIALIZED:
raise HTTPException(
status_code=400, detail="Backups repository is not initialized"
)
if restic.state is ResticStates.BACKING_UP:
raise HTTPException(status_code=409, detail="Backup is already running")
if restic.state is ResticStates.INITIALIZING:
raise HTTPException(status_code=400, detail="Repository is initializing")
if restic.state is ResticStates.RESTORING:
raise HTTPException(status_code=409, detail="Restore is already running")
for backup_item in restic.snapshot_list:
if backup_item["short_id"] == backup.backupId:
restic_tasks.restore_from_backup(backup.backupId)
return {
"status": 0,
"message": "Backup restoration procedure started",
}
raise HTTPException(status_code=404, detail="Backup not found")
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
class BackupConfigInput(BaseModel):
@ -265,17 +234,10 @@ class BackupConfigInput(BaseModel):
@router.put("/restic/backblaze/config")
async def set_backblaze_config(backup_config: BackupConfigInput):
with WriteUserData() as data:
if "backup" not in data:
data["backup"] = {}
data["backup"]["provider"] = "BACKBLAZE"
data["backup"]["accountId"] = backup_config.accountId
data["backup"]["accountKey"] = backup_config.accountKey
data["backup"]["bucket"] = backup_config.bucket
restic_tasks.update_keys_from_userdata()
return "New backup settings saved"
raise HTTPException(
status_code=410,
detail="This endpoint is deprecated, please use GraphQL API",
)
@router.post("/ssh/enable")

View File

@ -1,244 +0,0 @@
"""Restic singleton controller."""
from datetime import datetime
import json
import subprocess
import os
from threading import Lock
from enum import Enum
import portalocker
from selfprivacy_api.utils import ReadUserData
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
class ResticStates(Enum):
"""Restic states enum."""
NO_KEY = 0
NOT_INITIALIZED = 1
INITIALIZED = 2
BACKING_UP = 3
RESTORING = 4
ERROR = 5
INITIALIZING = 6
class ResticController(metaclass=SingletonMetaclass):
"""
States in wich the restic_controller may be
- no backblaze key
- backblaze key is provided, but repository is not initialized
- backblaze key is provided, repository is initialized
- fetching list of snapshots
- creating snapshot, current progress can be retrieved
- recovering from snapshot
Any ongoing operation acquires the lock
Current state can be fetched with get_state()
"""
_initialized = False
def __init__(self):
if self._initialized:
return
self.state = ResticStates.NO_KEY
self.lock = False
self.progress = 0
self._backblaze_account = None
self._backblaze_key = None
self._repository_name = None
self.snapshot_list = []
self.error_message = None
self._initialized = True
self.load_configuration()
self.write_rclone_config()
self.load_snapshots()
def load_configuration(self):
"""Load current configuration from user data to singleton."""
with ReadUserData() as user_data:
self._backblaze_account = user_data["backblaze"]["accountId"]
self._backblaze_key = user_data["backblaze"]["accountKey"]
self._repository_name = user_data["backblaze"]["bucket"]
if self._backblaze_account and self._backblaze_key and self._repository_name:
self.state = ResticStates.INITIALIZING
else:
self.state = ResticStates.NO_KEY
def write_rclone_config(self):
"""
Open /root/.config/rclone/rclone.conf with portalocker
and write configuration in the following format:
[backblaze]
type = b2
account = {self.backblaze_account}
key = {self.backblaze_key}
"""
with portalocker.Lock(
"/root/.config/rclone/rclone.conf", "w", timeout=None
) as rclone_config:
rclone_config.write(
f"[backblaze]\n"
f"type = b2\n"
f"account = {self._backblaze_account}\n"
f"key = {self._backblaze_key}\n"
)
def load_snapshots(self):
"""
Load list of snapshots from repository
"""
backup_listing_command = [
"restic",
"-o",
"rclone.args=serve restic --stdio",
"-r",
f"rclone:backblaze:{self._repository_name}/sfbackup",
"snapshots",
"--json",
]
if self.state in (ResticStates.BACKING_UP, ResticStates.RESTORING):
return
with subprocess.Popen(
backup_listing_command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as backup_listing_process_descriptor:
snapshots_list = backup_listing_process_descriptor.communicate()[0].decode(
"utf-8"
)
try:
starting_index = snapshots_list.find("[")
json.loads(snapshots_list[starting_index:])
self.snapshot_list = json.loads(snapshots_list[starting_index:])
self.state = ResticStates.INITIALIZED
print(snapshots_list)
except ValueError:
if "Is there a repository at the following location?" in snapshots_list:
self.state = ResticStates.NOT_INITIALIZED
return
self.state = ResticStates.ERROR
self.error_message = snapshots_list
return
def initialize_repository(self):
"""
Initialize repository with restic
"""
initialize_repository_command = [
"restic",
"-o",
"rclone.args=serve restic --stdio",
"-r",
f"rclone:backblaze:{self._repository_name}/sfbackup",
"init",
]
with subprocess.Popen(
initialize_repository_command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as initialize_repository_process_descriptor:
msg = initialize_repository_process_descriptor.communicate()[0].decode(
"utf-8"
)
if initialize_repository_process_descriptor.returncode == 0:
self.state = ResticStates.INITIALIZED
else:
self.state = ResticStates.ERROR
self.error_message = msg
self.state = ResticStates.INITIALIZED
def start_backup(self):
"""
Start backup with restic
"""
backup_command = [
"restic",
"-o",
"rclone.args=serve restic --stdio",
"-r",
f"rclone:backblaze:{self._repository_name}/sfbackup",
"--verbose",
"--json",
"backup",
"/var",
]
with open("/var/backup.log", "w", encoding="utf-8") as log_file:
subprocess.Popen(
backup_command,
shell=False,
stdout=log_file,
stderr=subprocess.STDOUT,
)
self.state = ResticStates.BACKING_UP
self.progress = 0
def check_progress(self):
"""
Check progress of ongoing backup operation
"""
backup_status_check_command = ["tail", "-1", "/var/backup.log"]
if self.state in (ResticStates.NO_KEY, ResticStates.NOT_INITIALIZED):
return
# If the log file does not exists
if os.path.exists("/var/backup.log") is False:
self.state = ResticStates.INITIALIZED
with subprocess.Popen(
backup_status_check_command,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as backup_status_check_process_descriptor:
backup_process_status = (
backup_status_check_process_descriptor.communicate()[0].decode("utf-8")
)
try:
status = json.loads(backup_process_status)
except ValueError:
print(backup_process_status)
self.error_message = backup_process_status
return
if status["message_type"] == "status":
self.progress = status["percent_done"]
self.state = ResticStates.BACKING_UP
elif status["message_type"] == "summary":
self.state = ResticStates.INITIALIZED
self.progress = 0
self.snapshot_list.append(
{
"short_id": status["snapshot_id"],
# Current time in format 2021-12-02T00:02:51.086452543+03:00
"time": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
)
def restore_from_backup(self, snapshot_id):
"""
Restore from backup with restic
"""
backup_restoration_command = [
"restic",
"-o",
"rclone.args=serve restic --stdio",
"-r",
f"rclone:backblaze:{self._repository_name}/sfbackup",
"restore",
snapshot_id,
"--target",
"/",
]
self.state = ResticStates.RESTORING
subprocess.run(backup_restoration_command, shell=False)
self.state = ResticStates.INITIALIZED

View File

@ -1,70 +0,0 @@
"""Tasks for the restic controller."""
from huey import crontab
from selfprivacy_api.utils.huey import huey
from . import ResticController, ResticStates
@huey.task()
def init_restic():
controller = ResticController()
if controller.state == ResticStates.NOT_INITIALIZED:
initialize_repository()
@huey.task()
def update_keys_from_userdata():
controller = ResticController()
controller.load_configuration()
controller.write_rclone_config()
initialize_repository()
# Check every morning at 5:00 AM
@huey.task(crontab(hour=5, minute=0))
def cron_load_snapshots():
controller = ResticController()
controller.load_snapshots()
# Check every morning at 5:00 AM
@huey.task()
def load_snapshots():
controller = ResticController()
controller.load_snapshots()
if controller.state == ResticStates.NOT_INITIALIZED:
load_snapshots.schedule(delay=120)
@huey.task()
def initialize_repository():
controller = ResticController()
if controller.state is not ResticStates.NO_KEY:
controller.initialize_repository()
load_snapshots()
@huey.task()
def fetch_backup_status():
controller = ResticController()
if controller.state is ResticStates.BACKING_UP:
controller.check_progress()
if controller.state is ResticStates.BACKING_UP:
fetch_backup_status.schedule(delay=2)
else:
load_snapshots.schedule(delay=240)
@huey.task()
def start_backup():
controller = ResticController()
if controller.state is ResticStates.NOT_INITIALIZED:
resp = initialize_repository()
resp.get()
controller.start_backup()
fetch_backup_status.schedule(delay=3)
@huey.task()
def restore_from_backup(snapshot):
controller = ResticController()
controller.restore_from_backup(snapshot)

View File

@ -42,7 +42,7 @@ def get_disabled_services() -> list[Service]:
def get_services_by_location(location: str) -> list[Service]:
return [service for service in services if service.get_location() == location]
return [service for service in services if service.get_drive() == location]
def get_all_required_dns_records() -> list[ServiceDnsRecord]:

View File

@ -3,14 +3,12 @@ import base64
import subprocess
import typing
from selfprivacy_api.jobs import Job, JobStatus, Jobs
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.huey import huey
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.bitwarden.icon import BITWARDEN_ICON
@ -38,6 +36,10 @@ class Bitwarden(Service):
"""Read SVG icon from file and return it as base64 encoded string."""
return base64.b64encode(BITWARDEN_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_user() -> str:
return "vaultwarden"
@staticmethod
def get_url() -> typing.Optional[str]:
"""Return service url."""
@ -52,6 +54,10 @@ class Bitwarden(Service):
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Password database, encryption certificate and attachments."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -111,19 +117,8 @@ class Bitwarden(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
storage_usage = 0
storage_usage += get_storage_usage("/var/lib/bitwarden")
storage_usage += get_storage_usage("/var/lib/bitwarden_rs")
return storage_usage
@staticmethod
def get_location() -> str:
with ReadUserData() as user_data:
if user_data.get("useBinds", False):
return user_data.get("bitwarden", {}).get("location", "sda1")
else:
return "sda1"
def get_folders() -> typing.List[str]:
return ["/var/lib/bitwarden", "/var/lib/bitwarden_rs"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
@ -154,20 +149,7 @@ class Bitwarden(Service):
self,
volume,
job,
[
FolderMoveNames(
name="bitwarden",
bind_location="/var/lib/bitwarden",
group="vaultwarden",
owner="vaultwarden",
),
FolderMoveNames(
name="bitwarden_rs",
bind_location="/var/lib/bitwarden_rs",
group="vaultwarden",
owner="vaultwarden",
),
],
FolderMoveNames.default_foldermoves(self),
"bitwarden",
)

View File

@ -1,5 +1,6 @@
"""Generic handler for moving services"""
from __future__ import annotations
import subprocess
import time
import pathlib
@ -11,6 +12,7 @@ from selfprivacy_api.utils.huey import huey
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils import ReadUserData, WriteUserData
from selfprivacy_api.services.service import Service, ServiceStatus
from selfprivacy_api.services.owned_path import OwnedPath
class FolderMoveNames(BaseModel):
@ -19,6 +21,26 @@ class FolderMoveNames(BaseModel):
owner: str
group: str
@staticmethod
def from_owned_path(path: OwnedPath) -> FolderMoveNames:
return FolderMoveNames(
name=FolderMoveNames.get_foldername(path.path),
bind_location=path.path,
owner=path.owner,
group=path.group,
)
@staticmethod
def get_foldername(path: str) -> str:
return path.split("/")[-1]
@staticmethod
def default_foldermoves(service: Service) -> list[FolderMoveNames]:
return [
FolderMoveNames.from_owned_path(folder)
for folder in service.get_owned_folders()
]
@huey.task()
def move_service(
@ -44,7 +66,7 @@ def move_service(
)
return
# Check if we are on the same volume
old_volume = service.get_location()
old_volume = service.get_drive()
if old_volume == volume.name:
Jobs.update(
job=job,
@ -61,7 +83,7 @@ def move_service(
)
return
# Make sure the volume is mounted
if volume.name != "sda1" and f"/volumes/{volume.name}" not in volume.mountpoints:
if not volume.is_root() and f"/volumes/{volume.name}" not in volume.mountpoints:
Jobs.update(
job=job,
status=JobStatus.ERROR,

View File

@ -5,12 +5,10 @@ import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.huey import huey
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.gitea.icon import GITEA_ICON
@ -52,6 +50,10 @@ class Gitea(Service):
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Git repositories, database and user data."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -110,18 +112,8 @@ class Gitea(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
storage_usage = 0
storage_usage += get_storage_usage("/var/lib/gitea")
return storage_usage
@staticmethod
def get_location() -> str:
with ReadUserData() as user_data:
if user_data.get("useBinds", False):
return user_data.get("gitea", {}).get("location", "sda1")
else:
return "sda1"
def get_folders() -> typing.List[str]:
return ["/var/lib/gitea"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
@ -151,14 +143,7 @@ class Gitea(Service):
self,
volume,
job,
[
FolderMoveNames(
name="gitea",
bind_location="/var/lib/gitea",
group="gitea",
owner="gitea",
),
],
FolderMoveNames.default_foldermoves(self),
"gitea",
)

View File

@ -3,17 +3,13 @@ import base64
import subprocess
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.jobs import Job
from selfprivacy_api.services.generic_status_getter import (
get_service_status,
get_service_status_from_several_units,
)
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.huey import huey
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.jitsi.icon import JITSI_ICON
@ -55,6 +51,10 @@ class Jitsi(Service):
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Secrets that are used to encrypt the communication."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -84,18 +84,27 @@ class Jitsi(Service):
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "jitsi-videobridge.service"])
subprocess.run(["systemctl", "stop", "jicofo.service"])
subprocess.run(
["systemctl", "stop", "jitsi-videobridge.service"],
check=False,
)
subprocess.run(["systemctl", "stop", "jicofo.service"], check=False)
@staticmethod
def start():
subprocess.run(["systemctl", "start", "jitsi-videobridge.service"])
subprocess.run(["systemctl", "start", "jicofo.service"])
subprocess.run(
["systemctl", "start", "jitsi-videobridge.service"],
check=False,
)
subprocess.run(["systemctl", "start", "jicofo.service"], check=False)
@staticmethod
def restart():
subprocess.run(["systemctl", "restart", "jitsi-videobridge.service"])
subprocess.run(["systemctl", "restart", "jicofo.service"])
subprocess.run(
["systemctl", "restart", "jitsi-videobridge.service"],
check=False,
)
subprocess.run(["systemctl", "restart", "jicofo.service"], check=False)
@staticmethod
def get_configuration():
@ -110,14 +119,8 @@ class Jitsi(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
storage_usage = 0
storage_usage += get_storage_usage("/var/lib/jitsi-meet")
return storage_usage
@staticmethod
def get_location() -> str:
return "sda1"
def get_folders() -> typing.List[str]:
return ["/var/lib/jitsi-meet"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:

View File

@ -4,17 +4,14 @@ import base64
import subprocess
import typing
from selfprivacy_api.jobs import Job, JobStatus, Jobs
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.generic_status_getter import (
get_service_status,
get_service_status_from_several_units,
)
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
import selfprivacy_api.utils as utils
from selfprivacy_api import utils
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.huey import huey
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.mailserver.icon import MAILSERVER_ICON
@ -24,7 +21,7 @@ class MailServer(Service):
@staticmethod
def get_id() -> str:
return "mailserver"
return "email"
@staticmethod
def get_display_name() -> str:
@ -38,6 +35,10 @@ class MailServer(Service):
def get_svg_icon() -> str:
return base64.b64encode(MAILSERVER_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_user() -> str:
return "virtualMail"
@staticmethod
def get_url() -> typing.Optional[str]:
"""Return service url."""
@ -51,6 +52,10 @@ class MailServer(Service):
def is_required() -> bool:
return True
@staticmethod
def get_backup_description() -> str:
return "Mail boxes and filters."
@staticmethod
def is_enabled() -> bool:
return True
@ -71,18 +76,18 @@ class MailServer(Service):
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "dovecot2.service"])
subprocess.run(["systemctl", "stop", "postfix.service"])
subprocess.run(["systemctl", "stop", "dovecot2.service"], check=False)
subprocess.run(["systemctl", "stop", "postfix.service"], check=False)
@staticmethod
def start():
subprocess.run(["systemctl", "start", "dovecot2.service"])
subprocess.run(["systemctl", "start", "postfix.service"])
subprocess.run(["systemctl", "start", "dovecot2.service"], check=False)
subprocess.run(["systemctl", "start", "postfix.service"], check=False)
@staticmethod
def restart():
subprocess.run(["systemctl", "restart", "dovecot2.service"])
subprocess.run(["systemctl", "restart", "postfix.service"])
subprocess.run(["systemctl", "restart", "dovecot2.service"], check=False)
subprocess.run(["systemctl", "restart", "postfix.service"], check=False)
@staticmethod
def get_configuration():
@ -97,16 +102,8 @@ class MailServer(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
return get_storage_usage("/var/vmail")
@staticmethod
def get_location() -> str:
with utils.ReadUserData() as user_data:
if user_data.get("useBinds", False):
return user_data.get("mailserver", {}).get("location", "sda1")
else:
return "sda1"
def get_folders() -> typing.List[str]:
return ["/var/vmail", "/var/sieve"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
@ -135,7 +132,7 @@ class MailServer(Service):
type="MX", name=domain, content=domain, ttl=3600, priority=10
),
ServiceDnsRecord(
type="TXT", name="_dmarc", content=f"v=DMARC1; p=none", ttl=18000
type="TXT", name="_dmarc", content="v=DMARC1; p=none", ttl=18000
),
ServiceDnsRecord(
type="TXT",
@ -150,7 +147,7 @@ class MailServer(Service):
def move_to_volume(self, volume: BlockDevice) -> Job:
job = Jobs.add(
type_id="services.mailserver.move",
type_id="services.email.move",
name="Move Mail Server",
description=f"Moving mailserver data to {volume.name}",
)
@ -159,21 +156,8 @@ class MailServer(Service):
self,
volume,
job,
[
FolderMoveNames(
name="vmail",
bind_location="/var/vmail",
group="virtualMail",
owner="virtualMail",
),
FolderMoveNames(
name="sieve",
bind_location="/var/sieve",
group="virtualMail",
owner="virtualMail",
),
],
"mailserver",
FolderMoveNames.default_foldermoves(self),
"email",
)
return job

View File

@ -4,7 +4,6 @@ import subprocess
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
@ -50,6 +49,10 @@ class Nextcloud(Service):
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "All the files and other data stored in Nextcloud."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -114,22 +117,8 @@ class Nextcloud(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
"""
Calculate the real storage usage of /var/lib/nextcloud and all subdirectories.
Calculate using pathlib.
Do not follow symlinks.
"""
return get_storage_usage("/var/lib/nextcloud")
@staticmethod
def get_location() -> str:
"""Get the name of disk where Nextcloud is installed."""
with ReadUserData() as user_data:
if user_data.get("useBinds", False):
return user_data.get("nextcloud", {}).get("location", "sda1")
else:
return "sda1"
def get_folders() -> typing.List[str]:
return ["/var/lib/nextcloud"]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
@ -158,14 +147,7 @@ class Nextcloud(Service):
self,
volume,
job,
[
FolderMoveNames(
name="nextcloud",
bind_location="/var/lib/nextcloud",
owner="nextcloud",
group="nextcloud",
),
],
FolderMoveNames.default_foldermoves(self),
"nextcloud",
)
return job

View File

@ -2,9 +2,7 @@
import base64
import subprocess
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.jobs import Job
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils import ReadUserData, WriteUserData
@ -45,6 +43,14 @@ class Ocserv(Service):
def is_required() -> bool:
return False
@staticmethod
def can_be_backed_up() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Nothing to backup."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -70,15 +76,15 @@ class Ocserv(Service):
@staticmethod
def stop():
subprocess.run(["systemctl", "stop", "ocserv.service"])
subprocess.run(["systemctl", "stop", "ocserv.service"], check=False)
@staticmethod
def start():
subprocess.run(["systemctl", "start", "ocserv.service"])
subprocess.run(["systemctl", "start", "ocserv.service"], check=False)
@staticmethod
def restart():
subprocess.run(["systemctl", "restart", "ocserv.service"])
subprocess.run(["systemctl", "restart", "ocserv.service"], check=False)
@staticmethod
def get_configuration():
@ -92,10 +98,6 @@ class Ocserv(Service):
def get_logs():
return ""
@staticmethod
def get_location() -> str:
return "sda1"
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
return [
@ -114,8 +116,8 @@ class Ocserv(Service):
]
@staticmethod
def get_storage_usage() -> int:
return 0
def get_folders() -> typing.List[str]:
return []
def move_to_volume(self, volume: BlockDevice) -> Job:
raise NotImplementedError("ocserv service is not movable")

View File

@ -0,0 +1,7 @@
from pydantic import BaseModel
class OwnedPath(BaseModel):
path: str
owner: str
group: str

View File

@ -4,9 +4,9 @@ import subprocess
import typing
from selfprivacy_api.jobs import Job, Jobs
from selfprivacy_api.services.generic_service_mover import FolderMoveNames, move_service
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.generic_status_getter import get_service_status
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api.utils import ReadUserData, WriteUserData, get_domain
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
@ -46,6 +46,10 @@ class Pleroma(Service):
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "Your Pleroma accounts, posts and media."
@staticmethod
def is_enabled() -> bool:
with ReadUserData() as user_data:
@ -97,19 +101,23 @@ class Pleroma(Service):
return ""
@staticmethod
def get_storage_usage() -> int:
storage_usage = 0
storage_usage += get_storage_usage("/var/lib/pleroma")
storage_usage += get_storage_usage("/var/lib/postgresql")
return storage_usage
@staticmethod
def get_location() -> str:
with ReadUserData() as user_data:
if user_data.get("useBinds", False):
return user_data.get("pleroma", {}).get("location", "sda1")
else:
return "sda1"
def get_owned_folders() -> typing.List[OwnedPath]:
"""
Get a list of occupied directories with ownership info
pleroma has folders that are owned by different users
"""
return [
OwnedPath(
path="/var/lib/pleroma",
owner="pleroma",
group="pleroma",
),
OwnedPath(
path="/var/lib/postgresql",
owner="postgres",
group="postgres",
),
]
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
@ -138,20 +146,7 @@ class Pleroma(Service):
self,
volume,
job,
[
FolderMoveNames(
name="pleroma",
bind_location="/var/lib/pleroma",
owner="pleroma",
group="pleroma",
),
FolderMoveNames(
name="postgresql",
bind_location="/var/lib/postgresql",
owner="postgres",
group="postgres",
),
],
FolderMoveNames.default_foldermoves(self),
"pleroma",
)
return job

View File

@ -6,7 +6,14 @@ import typing
from pydantic import BaseModel
from selfprivacy_api.jobs import Job
from selfprivacy_api.utils.block_devices import BlockDevice
from selfprivacy_api.utils.block_devices import BlockDevice, BlockDevices
from selfprivacy_api.services.generic_size_counter import get_storage_usage
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api import utils
from selfprivacy_api.utils.waitloop import wait_until_true
DEFAULT_START_STOP_TIMEOUT = 5 * 60
class ServiceStatus(Enum):
@ -38,71 +45,125 @@ class Service(ABC):
@staticmethod
@abstractmethod
def get_id() -> str:
"""
The unique id of the service.
"""
pass
@staticmethod
@abstractmethod
def get_display_name() -> str:
"""
The name of the service that is shown to the user.
"""
pass
@staticmethod
@abstractmethod
def get_description() -> str:
"""
The description of the service that is shown to the user.
"""
pass
@staticmethod
@abstractmethod
def get_svg_icon() -> str:
"""
The monochrome svg icon of the service.
"""
pass
@staticmethod
@abstractmethod
def get_url() -> typing.Optional[str]:
"""
The url of the service if it is accessible from the internet browser.
"""
pass
@classmethod
def get_user(cls) -> typing.Optional[str]:
"""
The user that owns the service's files.
Defaults to the service's id.
"""
return cls.get_id()
@classmethod
def get_group(cls) -> typing.Optional[str]:
"""
The group that owns the service's files.
Defaults to the service's user.
"""
return cls.get_user()
@staticmethod
@abstractmethod
def is_movable() -> bool:
"""`True` if the service can be moved to the non-system volume."""
pass
@staticmethod
@abstractmethod
def is_required() -> bool:
"""`True` if the service is required for the server to function."""
pass
@staticmethod
def can_be_backed_up() -> bool:
"""`True` if the service can be backed up."""
return True
@staticmethod
@abstractmethod
def get_backup_description() -> str:
"""
The text shown to the user that exlplains what data will be
backed up.
"""
pass
@staticmethod
@abstractmethod
def is_enabled() -> bool:
"""`True` if the service is enabled."""
pass
@staticmethod
@abstractmethod
def get_status() -> ServiceStatus:
"""The status of the service, reported by systemd."""
pass
@staticmethod
@abstractmethod
def enable():
"""Enable the service. Usually this means enabling systemd unit."""
pass
@staticmethod
@abstractmethod
def disable():
"""Disable the service. Usually this means disabling systemd unit."""
pass
@staticmethod
@abstractmethod
def stop():
"""Stop the service. Usually this means stopping systemd unit."""
pass
@staticmethod
@abstractmethod
def start():
"""Start the service. Usually this means starting systemd unit."""
pass
@staticmethod
@abstractmethod
def restart():
"""Restart the service. Usually this means restarting systemd unit."""
pass
@staticmethod
@ -120,21 +181,130 @@ class Service(ABC):
def get_logs():
pass
@staticmethod
@abstractmethod
def get_storage_usage() -> int:
pass
@classmethod
def get_storage_usage(cls) -> int:
"""
Calculate the real storage usage of folders occupied by service
Calculate using pathlib.
Do not follow symlinks.
"""
storage_used = 0
for folder in cls.get_folders():
storage_used += get_storage_usage(folder)
return storage_used
@staticmethod
@abstractmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
pass
@classmethod
def get_drive(cls) -> str:
"""
Get the name of the drive/volume where the service is located.
Example values are `sda1`, `vda`, `sdb`.
"""
root_device: str = BlockDevices().get_root_block_device().name
if not cls.is_movable():
return root_device
with utils.ReadUserData() as userdata:
if userdata.get("useBinds", False):
return userdata.get(cls.get_id(), {}).get(
"location",
root_device,
)
else:
return root_device
@classmethod
def get_folders(cls) -> typing.List[str]:
"""
get a plain list of occupied directories
Default extracts info from overriden get_owned_folders()
"""
if cls.get_owned_folders == Service.get_owned_folders:
raise NotImplementedError(
"you need to implement at least one of get_folders() or get_owned_folders()"
)
return [owned_folder.path for owned_folder in cls.get_owned_folders()]
@classmethod
def get_owned_folders(cls) -> typing.List[OwnedPath]:
"""
Get a list of occupied directories with ownership info
Default extracts info from overriden get_folders()
"""
if cls.get_folders == Service.get_folders:
raise NotImplementedError(
"you need to implement at least one of get_folders() or get_owned_folders()"
)
return [cls.owned_path(path) for path in cls.get_folders()]
@staticmethod
@abstractmethod
def get_location() -> str:
pass
def get_foldername(path: str) -> str:
return path.split("/")[-1]
@abstractmethod
def move_to_volume(self, volume: BlockDevice) -> Job:
pass
@classmethod
def owned_path(cls, path: str):
"""A default guess on folder ownership"""
return OwnedPath(
path=path,
owner=cls.get_user(),
group=cls.get_group(),
)
def pre_backup(self):
pass
def post_restore(self):
pass
class StoppedService:
"""
A context manager that stops the service if needed and reactivates it
after you are done if it was active
Example:
```
assert service.get_status() == ServiceStatus.ACTIVE
with StoppedService(service) [as stopped_service]:
assert service.get_status() == ServiceStatus.INACTIVE
```
"""
def __init__(self, service: Service):
self.service = service
self.original_status = service.get_status()
def __enter__(self) -> Service:
self.original_status = self.service.get_status()
if self.original_status not in [ServiceStatus.INACTIVE, ServiceStatus.FAILED]:
try:
self.service.stop()
wait_until_true(
lambda: self.service.get_status() == ServiceStatus.INACTIVE,
timeout_sec=DEFAULT_START_STOP_TIMEOUT,
)
except TimeoutError as error:
raise TimeoutError(
f"timed out waiting for {self.service.get_display_name()} to stop"
) from error
return self.service
def __exit__(self, type, value, traceback):
if self.original_status in [ServiceStatus.ACTIVATING, ServiceStatus.ACTIVE]:
try:
self.service.start()
wait_until_true(
lambda: self.service.get_status() == ServiceStatus.ACTIVE,
timeout_sec=DEFAULT_START_STOP_TIMEOUT,
)
except TimeoutError as error:
raise TimeoutError(
f"timed out waiting for {self.service.get_display_name()} to start"
) from error

View File

@ -0,0 +1,199 @@
"""Class representing Bitwarden service"""
import base64
import typing
import subprocess
from typing import List
from os import path
# from enum import Enum
from selfprivacy_api.jobs import Job
from selfprivacy_api.services.service import Service, ServiceDnsRecord, ServiceStatus
from selfprivacy_api.utils.block_devices import BlockDevice
import selfprivacy_api.utils.network as network_utils
from selfprivacy_api.services.test_service.icon import BITWARDEN_ICON
DEFAULT_DELAY = 0
class DummyService(Service):
"""A test service"""
folders: List[str] = []
startstop_delay = 0
backuppable = True
def __init_subclass__(cls, folders: List[str]):
cls.folders = folders
def __init__(self):
super().__init__()
status_file = self.status_file()
with open(status_file, "w") as file:
file.write(ServiceStatus.ACTIVE.value)
@staticmethod
def get_id() -> str:
"""Return service id."""
return "testservice"
@staticmethod
def get_display_name() -> str:
"""Return service display name."""
return "Test Service"
@staticmethod
def get_description() -> str:
"""Return service description."""
return "A small service used for test purposes. Does nothing."
@staticmethod
def get_svg_icon() -> str:
"""Read SVG icon from file and return it as base64 encoded string."""
# return ""
return base64.b64encode(BITWARDEN_ICON.encode("utf-8")).decode("utf-8")
@staticmethod
def get_url() -> typing.Optional[str]:
"""Return service url."""
domain = "test.com"
return f"https://password.{domain}"
@staticmethod
def is_movable() -> bool:
return True
@staticmethod
def is_required() -> bool:
return False
@staticmethod
def get_backup_description() -> str:
return "How did we get here?"
@staticmethod
def is_enabled() -> bool:
return True
@classmethod
def status_file(cls) -> str:
dir = cls.folders[0]
# we do not REALLY want to store our state in our declared folders
return path.join(dir, "..", "service_status")
@classmethod
def set_status(cls, status: ServiceStatus):
with open(cls.status_file(), "w") as file:
status_string = file.write(status.value)
@classmethod
def get_status(cls) -> ServiceStatus:
with open(cls.status_file(), "r") as file:
status_string = file.read().strip()
return ServiceStatus[status_string]
@classmethod
def change_status_with_async_delay(
cls, new_status: ServiceStatus, delay_sec: float
):
"""simulating a delay on systemd side"""
status_file = cls.status_file()
command = [
"bash",
"-c",
f" sleep {delay_sec} && echo {new_status.value} > {status_file}",
]
handle = subprocess.Popen(command)
if delay_sec == 0:
handle.communicate()
@classmethod
def set_backuppable(cls, new_value: bool) -> None:
"""For tests: because can_be_backed_up is static,
we can only set it up dynamically for tests via a classmethod"""
cls.backuppable = new_value
@classmethod
def can_be_backed_up(cls) -> bool:
"""`True` if the service can be backed up."""
return cls.backuppable
@classmethod
def enable(cls):
pass
@classmethod
def disable(cls, delay):
pass
@classmethod
def set_delay(cls, new_delay):
cls.startstop_delay = new_delay
@classmethod
def stop(cls):
# simulate a failing service unable to stop
if not cls.get_status() == ServiceStatus.FAILED:
cls.set_status(ServiceStatus.DEACTIVATING)
cls.change_status_with_async_delay(
ServiceStatus.INACTIVE, cls.startstop_delay
)
@classmethod
def start(cls):
cls.set_status(ServiceStatus.ACTIVATING)
cls.change_status_with_async_delay(ServiceStatus.ACTIVE, cls.startstop_delay)
@classmethod
def restart(cls):
cls.set_status(ServiceStatus.RELOADING) # is a correct one?
cls.change_status_with_async_delay(ServiceStatus.ACTIVE, cls.startstop_delay)
@staticmethod
def get_configuration():
return {}
@staticmethod
def set_configuration(config_items):
return super().set_configuration(config_items)
@staticmethod
def get_logs():
return ""
@staticmethod
def get_storage_usage() -> int:
storage_usage = 0
return storage_usage
@staticmethod
def get_drive() -> str:
return "sda1"
@classmethod
def get_folders(cls) -> List[str]:
return cls.folders
@staticmethod
def get_dns_records() -> typing.List[ServiceDnsRecord]:
"""Return list of DNS records for Bitwarden service."""
return [
ServiceDnsRecord(
type="A",
name="password",
content=network_utils.get_ip4(),
ttl=3600,
),
ServiceDnsRecord(
type="AAAA",
name="password",
content=network_utils.get_ip6(),
ttl=3600,
),
]
def move_to_volume(self, volume: BlockDevice) -> Job:
pass

View File

@ -0,0 +1,3 @@
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5.125 2C4.2962 2 3.50134 2.32924 2.91529 2.91529C2.32924 3.50134 2 4.2962 2 5.125L2 18.875C2 19.7038 2.32924 20.4987 2.91529 21.0847C3.50134 21.6708 4.2962 22 5.125 22H18.875C19.7038 22 20.4987 21.6708 21.0847 21.0847C21.6708 20.4987 22 19.7038 22 18.875V5.125C22 4.2962 21.6708 3.50134 21.0847 2.91529C20.4987 2.32924 19.7038 2 18.875 2H5.125ZM6.25833 4.43333H17.7583C17.9317 4.43333 18.0817 4.49667 18.2083 4.62333C18.2688 4.68133 18.3168 4.7511 18.3494 4.82835C18.3819 4.9056 18.3983 4.98869 18.3975 5.0725V12.7392C18.3975 13.3117 18.2858 13.8783 18.0633 14.4408C17.8558 14.9751 17.5769 15.4789 17.2342 15.9383C16.8824 16.3987 16.4882 16.825 16.0567 17.2117C15.6008 17.6242 15.18 17.9667 14.7942 18.24C14.4075 18.5125 14.005 18.77 13.5858 19.0133C13.1667 19.2558 12.8692 19.4208 12.6925 19.5075C12.5158 19.5942 12.375 19.6608 12.2675 19.7075C12.1872 19.7472 12.0987 19.7674 12.0092 19.7667C11.919 19.7674 11.8299 19.7468 11.7492 19.7067C11.6062 19.6429 11.4645 19.5762 11.3242 19.5067C11.0218 19.3511 10.7242 19.1866 10.4317 19.0133C10.0175 18.7738 9.6143 18.5158 9.22333 18.24C8.7825 17.9225 8.36093 17.5791 7.96083 17.2117C7.52907 16.825 7.13456 16.3987 6.7825 15.9383C6.44006 15.4788 6.16141 14.9751 5.95417 14.4408C5.73555 13.9 5.62213 13.3225 5.62 12.7392V5.0725C5.62 4.89917 5.68333 4.75 5.80917 4.6225C5.86726 4.56188 5.93717 4.51382 6.01457 4.48129C6.09196 4.44875 6.17521 4.43243 6.25917 4.43333H6.25833ZM12.0083 6.35V17.7C12.8 17.2817 13.5092 16.825 14.135 16.3333C15.6992 15.1083 16.4808 13.9108 16.4808 12.7392V6.35H12.0083Z" fill="black"/>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

@ -0,0 +1,5 @@
BITWARDEN_ICON = """
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M5.125 2C4.2962 2 3.50134 2.32924 2.91529 2.91529C2.32924 3.50134 2 4.2962 2 5.125L2 18.875C2 19.7038 2.32924 20.4987 2.91529 21.0847C3.50134 21.6708 4.2962 22 5.125 22H18.875C19.7038 22 20.4987 21.6708 21.0847 21.0847C21.6708 20.4987 22 19.7038 22 18.875V5.125C22 4.2962 21.6708 3.50134 21.0847 2.91529C20.4987 2.32924 19.7038 2 18.875 2H5.125ZM6.25833 4.43333H17.7583C17.9317 4.43333 18.0817 4.49667 18.2083 4.62333C18.2688 4.68133 18.3168 4.7511 18.3494 4.82835C18.3819 4.9056 18.3983 4.98869 18.3975 5.0725V12.7392C18.3975 13.3117 18.2858 13.8783 18.0633 14.4408C17.8558 14.9751 17.5769 15.4789 17.2342 15.9383C16.8824 16.3987 16.4882 16.825 16.0567 17.2117C15.6008 17.6242 15.18 17.9667 14.7942 18.24C14.4075 18.5125 14.005 18.77 13.5858 19.0133C13.1667 19.2558 12.8692 19.4208 12.6925 19.5075C12.5158 19.5942 12.375 19.6608 12.2675 19.7075C12.1872 19.7472 12.0987 19.7674 12.0092 19.7667C11.919 19.7674 11.8299 19.7468 11.7492 19.7067C11.6062 19.6429 11.4645 19.5762 11.3242 19.5067C11.0218 19.3511 10.7242 19.1866 10.4317 19.0133C10.0175 18.7738 9.6143 18.5158 9.22333 18.24C8.7825 17.9225 8.36093 17.5791 7.96083 17.2117C7.52907 16.825 7.13456 16.3987 6.7825 15.9383C6.44006 15.4788 6.16141 14.9751 5.95417 14.4408C5.73555 13.9 5.62213 13.3225 5.62 12.7392V5.0725C5.62 4.89917 5.68333 4.75 5.80917 4.6225C5.86726 4.56188 5.93717 4.51382 6.01457 4.48129C6.09196 4.44875 6.17521 4.43243 6.25917 4.43333H6.25833ZM12.0083 6.35V17.7C12.8 17.2817 13.5092 16.825 14.135 16.3333C15.6992 15.1083 16.4808 13.9108 16.4808 12.7392V6.35H12.0083Z" fill="black"/>
</svg>
"""

View File

@ -1,4 +1,4 @@
from selfprivacy_api.utils.huey import huey
from selfprivacy_api.jobs.test import test_job
from selfprivacy_api.restic_controller.tasks import *
from selfprivacy_api.backup.tasks import *
from selfprivacy_api.services.generic_service_mover import move_service

View File

@ -88,10 +88,12 @@ class ReadUserData(object):
def validate_ssh_public_key(key):
"""Validate SSH public key. It may be ssh-ed25519 or ssh-rsa."""
"""Validate SSH public key.
It may be ssh-ed25519, ssh-rsa or ecdsa-sha2-nistp256."""
if not key.startswith("ssh-ed25519"):
if not key.startswith("ssh-rsa"):
return False
if not key.startswith("ecdsa-sha2-nistp256"):
return False
return True

View File

@ -71,6 +71,12 @@ class BlockDevice:
def __hash__(self):
return hash(self.name)
def is_root(self) -> bool:
"""
Return True if the block device is the root device.
"""
return "/" in self.mountpoints
def stats(self) -> typing.Dict[str, typing.Any]:
"""
Update current data and return a dictionary of stats.
@ -175,6 +181,9 @@ class BlockDevices(metaclass=SingletonMetaclass):
# Ignore devices with type "rom"
if device["type"] == "rom":
continue
# Ignore iso9660 devices
if device["fstype"] == "iso9660":
continue
if device["fstype"] is None:
if "children" in device:
for child in device["children"]:
@ -218,3 +227,12 @@ class BlockDevices(metaclass=SingletonMetaclass):
if mountpoint in block_device.mountpoints:
block_devices.append(block_device)
return block_devices
def get_root_block_device(self) -> BlockDevice:
"""
Return the root block device.
"""
for block_device in self.block_devices:
if "/" in block_device.mountpoints:
return block_device
raise RuntimeError("No root block device found")

View File

@ -11,4 +11,5 @@ test_mode = os.environ.get("TEST_MODE")
huey = SqliteHuey(
HUEY_DATABASE,
immediate=test_mode == "true",
utc=True,
)

View File

@ -0,0 +1,33 @@
from datetime import datetime
from typing import Optional
from enum import Enum
def store_model_as_hash(redis, redis_key, model):
for key, value in model.dict().items():
if isinstance(value, datetime):
value = value.isoformat()
if isinstance(value, Enum):
value = value.value
redis.hset(redis_key, key, str(value))
def hash_as_model(redis, redis_key: str, model_class):
token_dict = _model_dict_from_hash(redis, redis_key)
if token_dict is not None:
return model_class(**token_dict)
return None
def _prepare_model_dict(d: dict):
for key in d.keys():
if d[key] == "None":
d[key] = None
def _model_dict_from_hash(redis, redis_key: str) -> Optional[dict]:
if redis.exists(redis_key):
token_dict = redis.hgetall(redis_key)
_prepare_model_dict(token_dict)
return token_dict
return None

View File

@ -1,9 +1,9 @@
"""
Redis pool module for selfprivacy_api
"""
from os import environ
import redis
from selfprivacy_api.utils.singleton_metaclass import SingletonMetaclass
from os import environ
REDIS_SOCKET = "/run/redis-sp-api/redis.sock"
@ -14,7 +14,7 @@ class RedisPool(metaclass=SingletonMetaclass):
"""
def __init__(self):
if "USE_REDIS_PORT" in environ.keys():
if "USE_REDIS_PORT" in environ:
self._pool = redis.ConnectionPool(
host="127.0.0.1",
port=int(environ["USE_REDIS_PORT"]),

View File

@ -0,0 +1,20 @@
from time import sleep
from typing import Callable
from typing import Optional
def wait_until_true(
readiness_checker: Callable[[], bool],
*,
interval: float = 0.1,
timeout_sec: Optional[float] = None
):
elapsed = 0.0
if timeout_sec is None:
timeout_sec = 10e16
while (not readiness_checker()) and elapsed < timeout_sec:
sleep(interval)
elapsed += interval
if elapsed > timeout_sec:
raise TimeoutError()

View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name="selfprivacy_api",
version="2.1.2",
version="2.4.2",
packages=find_packages(),
scripts=[
"selfprivacy_api/app.py",

View File

@ -12,6 +12,9 @@ let
mnemonic
coverage
pylint
rope
mypy
pylsp-mypy
pydantic
typing-extensions
psutil
@ -20,6 +23,8 @@ let
uvicorn
redis
strawberry-graphql
flake8-bugbear
flake8
]);
in
pkgs.mkShell {
@ -28,6 +33,7 @@ pkgs.mkShell {
pkgs.black
pkgs.redis
pkgs.restic
pkgs.rclone
];
shellHook = ''
PYTHONPATH=${sp-python}/${sp-python.sitePackages}
@ -35,7 +41,8 @@ pkgs.mkShell {
# for example. printenv <Name> will not fetch the value of an attribute.
export USE_REDIS_PORT=6379
pkill redis-server
redis-server --bind 127.0.0.1 --port $USE_REDIS_PORT >/dev/null &
sleep 2
setsid redis-server --bind 127.0.0.1 --port $USE_REDIS_PORT >/dev/null 2>/dev/null &
# maybe set more env-vars
'';
}

View File

@ -40,6 +40,10 @@ def generate_users_query(query_array):
return "query TestUsers {\n users {" + "\n".join(query_array) + "}\n}"
def generate_backup_query(query_array):
return "query TestBackup {\n backup {" + "\n".join(query_array) + "}\n}"
def mnemonic_to_hex(mnemonic):
return Mnemonic(language="english").to_entropy(mnemonic).hex()

View File

@ -3,6 +3,8 @@
# pylint: disable=unused-argument
import os
import pytest
from os import path
from fastapi.testclient import TestClient
import os.path as path
import datetime
@ -95,6 +97,20 @@ def jobs_file(mocker, shared_datadir):
return mock
@pytest.fixture
def generic_userdata(mocker, tmpdir):
filename = "turned_on.json"
source_path = path.join(global_data_dir(), filename)
userdata_path = path.join(tmpdir, filename)
with open(userdata_path, "w") as file:
with open(source_path, "r") as source:
file.write(source.read())
mock = mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=userdata_path)
return mock
@pytest.fixture
def huey_database(mocker, shared_datadir):
"""Mock huey database."""

60
tests/data/turned_on.json Normal file
View File

@ -0,0 +1,60 @@
{
"api": {
"token": "TEST_TOKEN",
"enableSwagger": false
},
"bitwarden": {
"enable": true
},
"databasePassword": "PASSWORD",
"domain": "test.tld",
"hashedMasterPassword": "HASHED_PASSWORD",
"hostname": "test-instance",
"nextcloud": {
"adminPassword": "ADMIN",
"databasePassword": "ADMIN",
"enable": true
},
"resticPassword": "PASS",
"ssh": {
"enable": true,
"passwordAuthentication": true,
"rootKeys": [
"ssh-ed25519 KEY test@pc"
]
},
"username": "tester",
"gitea": {
"enable": true
},
"ocserv": {
"enable": true
},
"pleroma": {
"enable": true
},
"jitsi": {
"enable": true
},
"autoUpgrade": {
"enable": true,
"allowReboot": true
},
"timezone": "Europe/Moscow",
"sshKeys": [
"ssh-rsa KEY test@pc"
],
"dns": {
"provider": "CLOUDFLARE",
"apiKey": "TOKEN"
},
"server": {
"provider": "HETZNER"
},
"backup": {
"provider": "BACKBLAZE",
"accountId": "ID",
"accountKey": "KEY",
"bucket": "selfprivacy"
}
}

View File

@ -488,3 +488,21 @@ def test_get_block_devices_by_mountpoint(lsblk_full_mock, authorized_client):
def test_get_block_devices_by_mountpoint_no_match(lsblk_full_mock, authorized_client):
block_devices = BlockDevices().get_block_devices_by_mountpoint("/foo")
assert len(block_devices) == 0
def test_get_root_block_device(lsblk_full_mock, authorized_client):
block_device = BlockDevices().get_root_block_device()
assert block_device is not None
assert block_device.name == "sda1"
assert block_device.path == "/dev/sda1"
assert block_device.fsavail == "4605702144"
assert block_device.fssize == "19814920192"
assert block_device.fstype == "ext4"
assert block_device.fsused == "14353719296"
assert block_device.mountpoints == ["/nix/store", "/"]
assert block_device.label is None
assert block_device.uuid == "ec80c004-baec-4a2c-851d-0e1807135511"
assert block_device.size == "20210236928"
assert block_device.model is None
assert block_device.serial is None
assert block_device.type == "part"

View File

@ -0,0 +1,482 @@
from os import path
from tests.test_graphql.test_backup import dummy_service, backups, raw_dummy_service
from tests.common import generate_backup_query
from selfprivacy_api.graphql.common_types.service import service_to_graphql_service
from selfprivacy_api.graphql.common_types.backup import (
_AutobackupQuotas,
AutobackupQuotas,
)
from selfprivacy_api.jobs import Jobs, JobStatus
API_RELOAD_SNAPSHOTS = """
mutation TestSnapshotsReload {
backup {
forceSnapshotsReload {
success
message
code
}
}
}
"""
API_SET_AUTOBACKUP_PERIOD_MUTATION = """
mutation TestAutobackupPeriod($period: Int) {
backup {
setAutobackupPeriod(period: $period) {
success
message
code
configuration {
provider
encryptionKey
isInitialized
autobackupPeriod
locationName
locationId
}
}
}
}
"""
API_SET_AUTOBACKUP_QUOTAS_MUTATION = """
mutation TestAutobackupQuotas($input: AutobackupQuotasInput!) {
backup {
setAutobackupQuotas(quotas: $input) {
success
message
code
configuration {
provider
encryptionKey
isInitialized
autobackupPeriod
locationName
locationId
autobackupQuotas {
last
daily
weekly
monthly
yearly
}
}
}
}
}
"""
API_REMOVE_REPOSITORY_MUTATION = """
mutation TestRemoveRepo {
backup {
removeRepository {
success
message
code
configuration {
provider
encryptionKey
isInitialized
autobackupPeriod
locationName
locationId
}
}
}
}
"""
API_INIT_MUTATION = """
mutation TestInitRepo($input: InitializeRepositoryInput!) {
backup {
initializeRepository(repository: $input) {
success
message
code
configuration {
provider
encryptionKey
isInitialized
autobackupPeriod
locationName
locationId
}
}
}
}
"""
API_RESTORE_MUTATION = """
mutation TestRestoreService($snapshot_id: String!) {
backup {
restoreBackup(snapshotId: $snapshot_id) {
success
message
code
job {
uid
status
}
}
}
}
"""
API_FORGET_MUTATION = """
mutation TestForgetSnapshot($snapshot_id: String!) {
backup {
forgetSnapshot(snapshotId: $snapshot_id) {
success
message
code
}
}
}
"""
API_SNAPSHOTS_QUERY = """
allSnapshots {
id
service {
id
}
createdAt
reason
}
"""
API_BACK_UP_MUTATION = """
mutation TestBackupService($service_id: String!) {
backup {
startBackup(serviceId: $service_id) {
success
message
code
job {
uid
status
}
}
}
}
"""
def api_restore(authorized_client, snapshot_id):
response = authorized_client.post(
"/graphql",
json={
"query": API_RESTORE_MUTATION,
"variables": {"snapshot_id": snapshot_id},
},
)
return response
def api_backup(authorized_client, service):
response = authorized_client.post(
"/graphql",
json={
"query": API_BACK_UP_MUTATION,
"variables": {"service_id": service.get_id()},
},
)
return response
def api_forget(authorized_client, snapshot_id):
response = authorized_client.post(
"/graphql",
json={
"query": API_FORGET_MUTATION,
"variables": {"snapshot_id": snapshot_id},
},
)
return response
def api_set_period(authorized_client, period):
response = authorized_client.post(
"/graphql",
json={
"query": API_SET_AUTOBACKUP_PERIOD_MUTATION,
"variables": {"period": period},
},
)
return response
def api_set_quotas(authorized_client, quotas: _AutobackupQuotas):
response = authorized_client.post(
"/graphql",
json={
"query": API_SET_AUTOBACKUP_QUOTAS_MUTATION,
"variables": {"input": quotas.dict()},
},
)
return response
def api_remove(authorized_client):
response = authorized_client.post(
"/graphql",
json={
"query": API_REMOVE_REPOSITORY_MUTATION,
"variables": {},
},
)
return response
def api_reload_snapshots(authorized_client):
response = authorized_client.post(
"/graphql",
json={
"query": API_RELOAD_SNAPSHOTS,
"variables": {},
},
)
return response
def api_init_without_key(
authorized_client, kind, login, password, location_name, location_id
):
response = authorized_client.post(
"/graphql",
json={
"query": API_INIT_MUTATION,
"variables": {
"input": {
"provider": kind,
"locationId": location_id,
"locationName": location_name,
"login": login,
"password": password,
}
},
},
)
return response
def assert_ok(data):
assert data["code"] == 200
assert data["success"] is True
def get_data(response):
assert response.status_code == 200
response = response.json()
if (
"errors" in response.keys()
): # convenience for debugging, this will display error
assert response["errors"] == []
assert response["data"] is not None
data = response["data"]
return data
def api_snapshots(authorized_client):
response = authorized_client.post(
"/graphql",
json={"query": generate_backup_query([API_SNAPSHOTS_QUERY])},
)
data = get_data(response)
result = data["backup"]["allSnapshots"]
assert result is not None
return result
def test_dummy_service_convertible_to_gql(dummy_service):
gql_service = service_to_graphql_service(dummy_service)
assert gql_service is not None
def test_snapshots_empty(authorized_client, dummy_service):
snaps = api_snapshots(authorized_client)
assert snaps == []
def test_start_backup(authorized_client, dummy_service):
response = api_backup(authorized_client, dummy_service)
data = get_data(response)["backup"]["startBackup"]
assert data["success"] is True
job = data["job"]
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
snaps = api_snapshots(authorized_client)
assert len(snaps) == 1
snap = snaps[0]
assert snap["id"] is not None
assert snap["id"] != ""
assert snap["service"]["id"] == "testservice"
def test_restore(authorized_client, dummy_service):
api_backup(authorized_client, dummy_service)
snap = api_snapshots(authorized_client)[0]
assert snap["id"] is not None
response = api_restore(authorized_client, snap["id"])
data = get_data(response)["backup"]["restoreBackup"]
assert data["success"] is True
job = data["job"]
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
def test_reinit(authorized_client, dummy_service, tmpdir):
test_repo_path = path.join(tmpdir, "not_at_all_sus")
response = api_init_without_key(
authorized_client, "FILE", "", "", test_repo_path, ""
)
data = get_data(response)["backup"]["initializeRepository"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["provider"] == "FILE"
assert configuration["locationId"] == ""
assert configuration["locationName"] == test_repo_path
assert len(configuration["encryptionKey"]) > 1
assert configuration["isInitialized"] is True
response = api_backup(authorized_client, dummy_service)
data = get_data(response)["backup"]["startBackup"]
assert data["success"] is True
job = data["job"]
assert Jobs.get_job(job["uid"]).status == JobStatus.FINISHED
def test_remove(authorized_client, generic_userdata):
response = api_remove(authorized_client)
data = get_data(response)["backup"]["removeRepository"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["provider"] == "NONE"
assert configuration["locationId"] == ""
assert configuration["locationName"] == ""
# still generated every time it is missing
assert len(configuration["encryptionKey"]) > 1
assert configuration["isInitialized"] is False
def test_autobackup_quotas_nonzero(authorized_client):
quotas = _AutobackupQuotas(
last=3,
daily=2,
weekly=4,
monthly=13,
yearly=14,
)
response = api_set_quotas(authorized_client, quotas)
data = get_data(response)["backup"]["setAutobackupQuotas"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["autobackupQuotas"] == quotas
def test_autobackup_period_nonzero(authorized_client):
new_period = 11
response = api_set_period(authorized_client, new_period)
data = get_data(response)["backup"]["setAutobackupPeriod"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["autobackupPeriod"] == new_period
def test_autobackup_period_zero(authorized_client):
new_period = 0
# since it is none by default, we better first set it to something non-negative
response = api_set_period(authorized_client, 11)
# and now we nullify it
response = api_set_period(authorized_client, new_period)
data = get_data(response)["backup"]["setAutobackupPeriod"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["autobackupPeriod"] == None
def test_autobackup_period_none(authorized_client):
# since it is none by default, we better first set it to something non-negative
response = api_set_period(authorized_client, 11)
# and now we nullify it
response = api_set_period(authorized_client, None)
data = get_data(response)["backup"]["setAutobackupPeriod"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["autobackupPeriod"] == None
def test_autobackup_period_negative(authorized_client):
# since it is none by default, we better first set it to something non-negative
response = api_set_period(authorized_client, 11)
# and now we nullify it
response = api_set_period(authorized_client, -12)
data = get_data(response)["backup"]["setAutobackupPeriod"]
assert_ok(data)
configuration = data["configuration"]
assert configuration["autobackupPeriod"] == None
# We cannot really check the effect at this level, we leave it to backend tests
# But we still make it run in both empty and full scenarios and ask for snaps afterwards
def test_reload_snapshots_bare_bare_bare(authorized_client, dummy_service):
api_remove(authorized_client)
response = api_reload_snapshots(authorized_client)
data = get_data(response)["backup"]["forceSnapshotsReload"]
assert_ok(data)
snaps = api_snapshots(authorized_client)
assert snaps == []
def test_reload_snapshots(authorized_client, dummy_service):
response = api_backup(authorized_client, dummy_service)
data = get_data(response)["backup"]["startBackup"]
response = api_reload_snapshots(authorized_client)
data = get_data(response)["backup"]["forceSnapshotsReload"]
assert_ok(data)
snaps = api_snapshots(authorized_client)
assert len(snaps) == 1
def test_forget_snapshot(authorized_client, dummy_service):
response = api_backup(authorized_client, dummy_service)
data = get_data(response)["backup"]["startBackup"]
snaps = api_snapshots(authorized_client)
assert len(snaps) == 1
response = api_forget(authorized_client, snaps[0]["id"])
data = get_data(response)["backup"]["forgetSnapshot"]
assert_ok(data)
snaps = api_snapshots(authorized_client)
assert len(snaps) == 0
def test_forget_nonexistent_snapshot(authorized_client, dummy_service):
snaps = api_snapshots(authorized_client)
assert len(snaps) == 0
response = api_forget(authorized_client, "898798uekiodpjoiweoiwuoeirueor")
data = get_data(response)["backup"]["forgetSnapshot"]
assert data["code"] == 404
assert data["success"] is False
snaps = api_snapshots(authorized_client)
assert len(snaps) == 0

View File

@ -76,10 +76,12 @@ def test_graphql_tokens_info_unauthorized(client, tokens_file):
DELETE_TOKEN_MUTATION = """
mutation DeleteToken($device: String!) {
deleteDeviceApiToken(device: $device) {
success
message
code
api {
deleteDeviceApiToken(device: $device) {
success
message
code
}
}
}
"""
@ -132,7 +134,10 @@ def test_graphql_delete_self_token(authorized_client, tokens_file):
assert_original(authorized_client)
def test_graphql_delete_nonexistent_token(authorized_client, tokens_file):
def test_graphql_delete_nonexistent_token(
authorized_client,
tokens_file,
):
response = authorized_client.post(
"/graphql",
json={
@ -149,11 +154,13 @@ def test_graphql_delete_nonexistent_token(authorized_client, tokens_file):
REFRESH_TOKEN_MUTATION = """
mutation RefreshToken {
refreshDeviceApiToken {
success
message
code
token
api {
refreshDeviceApiToken {
success
message
code
token
}
}
}
"""
@ -184,17 +191,22 @@ def test_graphql_refresh_token(authorized_client, client, tokens_file):
NEW_DEVICE_KEY_MUTATION = """
mutation NewDeviceKey {
getNewDeviceApiKey {
success
message
code
key
api {
getNewDeviceApiKey {
success
message
code
key
}
}
}
"""
def test_graphql_get_new_device_auth_key_unauthorized(client, tokens_file):
def test_graphql_get_new_device_auth_key_unauthorized(
client,
tokens_file,
):
response = client.post(
"/graphql",
json={"query": NEW_DEVICE_KEY_MUTATION},
@ -204,20 +216,25 @@ def test_graphql_get_new_device_auth_key_unauthorized(client, tokens_file):
INVALIDATE_NEW_DEVICE_KEY_MUTATION = """
mutation InvalidateNewDeviceKey {
invalidateNewDeviceApiKey {
success
message
code
api {
invalidateNewDeviceApiKey {
success
message
code
}
}
}
"""
def test_graphql_invalidate_new_device_token_unauthorized(client, tokens_file):
def test_graphql_invalidate_new_device_token_unauthorized(
client,
tokens_file,
):
response = client.post(
"/graphql",
json={
"query": DELETE_TOKEN_MUTATION,
"query": INVALIDATE_NEW_DEVICE_KEY_MUTATION,
"variables": {
"device": "test_token",
},
@ -241,11 +258,13 @@ def test_graphql_get_and_delete_new_device_key(client, authorized_client, tokens
AUTHORIZE_WITH_NEW_DEVICE_KEY_MUTATION = """
mutation AuthorizeWithNewDeviceKey($input: UseNewDeviceKeyInput!) {
authorizeWithNewDeviceApiKey(input: $input) {
success
message
code
token
api {
authorizeWithNewDeviceApiKey(input: $input) {
success
message
code
token
}
}
}
"""
@ -293,7 +312,10 @@ def test_graphql_get_and_authorize_key_after_12_minutes(
assert_errorcode(response, "authorizeWithNewDeviceApiKey", 404)
def test_graphql_authorize_without_token(client, tokens_file):
def test_graphql_authorize_without_token(
client,
tokens_file,
):
response = client.post(
"/graphql",
json={

View File

@ -118,22 +118,26 @@ def test_graphql_recovery_key_status_when_none_exists(authorized_client, tokens_
API_RECOVERY_KEY_GENERATE_MUTATION = """
mutation TestGenerateRecoveryKey($limits: RecoveryKeyLimitsInput) {
getNewRecoveryApiKey(limits: $limits) {
success
message
code
key
api {
getNewRecoveryApiKey(limits: $limits) {
success
message
code
key
}
}
}
"""
API_RECOVERY_KEY_USE_MUTATION = """
mutation TestUseRecoveryKey($input: UseRecoveryKeyInput!) {
useRecoveryApiKey(input: $input) {
success
message
code
token
api {
useRecoveryApiKey(input: $input) {
success
message
code
token
}
}
}
"""

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
from selfprivacy_api.backup.local_secret import LocalBackupSecret
from pytest import fixture
@fixture()
def localsecret():
LocalBackupSecret._full_reset()
return LocalBackupSecret
def test_local_secret_firstget(localsecret):
assert not LocalBackupSecret.exists()
secret = LocalBackupSecret.get()
assert LocalBackupSecret.exists()
assert secret is not None
# making sure it does not reset again
secret2 = LocalBackupSecret.get()
assert LocalBackupSecret.exists()
assert secret2 == secret
def test_local_secret_reset(localsecret):
secret1 = LocalBackupSecret.get()
LocalBackupSecret.reset()
secret2 = LocalBackupSecret.get()
assert secret2 is not None
assert secret2 != secret1
def test_local_secret_set(localsecret):
newsecret = "great and totally safe secret"
oldsecret = LocalBackupSecret.get()
assert oldsecret != newsecret
LocalBackupSecret.set(newsecret)
assert LocalBackupSecret.get() == newsecret

View File

@ -45,13 +45,15 @@ def some_users(mocker, datadir):
API_CREATE_SSH_KEY_MUTATION = """
mutation addSshKey($sshInput: SshMutationInput!) {
addSshKey(sshInput: $sshInput) {
success
message
code
user {
username
sshKeys
users {
addSshKey(sshInput: $sshInput) {
success
message
code
user {
username
sshKeys
}
}
}
}
@ -90,12 +92,12 @@ def test_graphql_add_ssh_key(authorized_client, some_users, mock_subprocess_pope
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["addSshKey"]["code"] == 201
assert response.json()["data"]["addSshKey"]["message"] is not None
assert response.json()["data"]["addSshKey"]["success"] is True
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
assert response.json()["data"]["addSshKey"]["user"]["username"] == "user1"
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "user1"
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
"ssh-rsa KEY user1@pc",
"ssh-rsa KEY test_key@pc",
]
@ -117,12 +119,12 @@ def test_graphql_add_root_ssh_key(authorized_client, some_users, mock_subprocess
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["addSshKey"]["code"] == 201
assert response.json()["data"]["addSshKey"]["message"] is not None
assert response.json()["data"]["addSshKey"]["success"] is True
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
assert response.json()["data"]["addSshKey"]["user"]["username"] == "root"
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "root"
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
"ssh-ed25519 KEY test@pc",
"ssh-rsa KEY test_key@pc",
]
@ -144,12 +146,12 @@ def test_graphql_add_main_ssh_key(authorized_client, some_users, mock_subprocess
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["addSshKey"]["code"] == 201
assert response.json()["data"]["addSshKey"]["message"] is not None
assert response.json()["data"]["addSshKey"]["success"] is True
assert response.json()["data"]["users"]["addSshKey"]["code"] == 201
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
assert response.json()["data"]["users"]["addSshKey"]["success"] is True
assert response.json()["data"]["addSshKey"]["user"]["username"] == "tester"
assert response.json()["data"]["addSshKey"]["user"]["sshKeys"] == [
assert response.json()["data"]["users"]["addSshKey"]["user"]["username"] == "tester"
assert response.json()["data"]["users"]["addSshKey"]["user"]["sshKeys"] == [
"ssh-rsa KEY test@pc",
"ssh-rsa KEY test_key@pc",
]
@ -171,9 +173,9 @@ def test_graphql_add_bad_ssh_key(authorized_client, some_users, mock_subprocess_
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["addSshKey"]["code"] == 400
assert response.json()["data"]["addSshKey"]["message"] is not None
assert response.json()["data"]["addSshKey"]["success"] is False
assert response.json()["data"]["users"]["addSshKey"]["code"] == 400
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
assert response.json()["data"]["users"]["addSshKey"]["success"] is False
def test_graphql_add_ssh_key_nonexistent_user(
@ -194,20 +196,22 @@ def test_graphql_add_ssh_key_nonexistent_user(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["addSshKey"]["code"] == 404
assert response.json()["data"]["addSshKey"]["message"] is not None
assert response.json()["data"]["addSshKey"]["success"] is False
assert response.json()["data"]["users"]["addSshKey"]["code"] == 404
assert response.json()["data"]["users"]["addSshKey"]["message"] is not None
assert response.json()["data"]["users"]["addSshKey"]["success"] is False
API_REMOVE_SSH_KEY_MUTATION = """
mutation removeSshKey($sshInput: SshMutationInput!) {
removeSshKey(sshInput: $sshInput) {
success
message
code
user {
username
sshKeys
users {
removeSshKey(sshInput: $sshInput) {
success
message
code
user {
username
sshKeys
}
}
}
}
@ -246,12 +250,14 @@ def test_graphql_remove_ssh_key(authorized_client, some_users, mock_subprocess_p
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["removeSshKey"]["success"] is True
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "user1"
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
assert (
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "user1"
)
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
def test_graphql_remove_root_ssh_key(
@ -272,12 +278,14 @@ def test_graphql_remove_root_ssh_key(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["removeSshKey"]["success"] is True
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "root"
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
assert (
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "root"
)
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
def test_graphql_remove_main_ssh_key(
@ -298,12 +306,14 @@ def test_graphql_remove_main_ssh_key(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["removeSshKey"]["success"] is True
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 200
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["users"]["removeSshKey"]["success"] is True
assert response.json()["data"]["removeSshKey"]["user"]["username"] == "tester"
assert response.json()["data"]["removeSshKey"]["user"]["sshKeys"] == []
assert (
response.json()["data"]["users"]["removeSshKey"]["user"]["username"] == "tester"
)
assert response.json()["data"]["users"]["removeSshKey"]["user"]["sshKeys"] == []
def test_graphql_remove_nonexistent_ssh_key(
@ -324,9 +334,9 @@ def test_graphql_remove_nonexistent_ssh_key(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["removeSshKey"]["code"] == 404
assert response.json()["data"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["removeSshKey"]["success"] is False
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 404
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["users"]["removeSshKey"]["success"] is False
def test_graphql_remove_ssh_key_nonexistent_user(
@ -347,6 +357,6 @@ def test_graphql_remove_ssh_key_nonexistent_user(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["removeSshKey"]["code"] == 404
assert response.json()["data"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["removeSshKey"]["success"] is False
assert response.json()["data"]["users"]["removeSshKey"]["code"] == 404
assert response.json()["data"]["users"]["removeSshKey"]["message"] is not None
assert response.json()["data"]["users"]["removeSshKey"]["success"] is False

View File

@ -380,11 +380,13 @@ def test_graphql_get_timezone_on_undefined(authorized_client, undefined_config):
API_CHANGE_TIMEZONE_MUTATION = """
mutation changeTimezone($timezone: String!) {
changeTimezone(timezone: $timezone) {
success
message
code
timezone
system {
changeTimezone(timezone: $timezone) {
success
message
code
timezone
}
}
}
"""
@ -417,10 +419,13 @@ def test_graphql_change_timezone(authorized_client, turned_on):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeTimezone"]["success"] is True
assert response.json()["data"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["changeTimezone"]["code"] == 200
assert response.json()["data"]["changeTimezone"]["timezone"] == "Europe/Helsinki"
assert response.json()["data"]["system"]["changeTimezone"]["success"] is True
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 200
assert (
response.json()["data"]["system"]["changeTimezone"]["timezone"]
== "Europe/Helsinki"
)
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Helsinki"
@ -437,10 +442,13 @@ def test_graphql_change_timezone_on_undefined(authorized_client, undefined_confi
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeTimezone"]["success"] is True
assert response.json()["data"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["changeTimezone"]["code"] == 200
assert response.json()["data"]["changeTimezone"]["timezone"] == "Europe/Helsinki"
assert response.json()["data"]["system"]["changeTimezone"]["success"] is True
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 200
assert (
response.json()["data"]["system"]["changeTimezone"]["timezone"]
== "Europe/Helsinki"
)
assert (
read_json(undefined_config / "undefined.json")["timezone"] == "Europe/Helsinki"
)
@ -459,10 +467,10 @@ def test_graphql_change_timezone_without_timezone(authorized_client, turned_on):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeTimezone"]["success"] is False
assert response.json()["data"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["changeTimezone"]["code"] == 400
assert response.json()["data"]["changeTimezone"]["timezone"] is None
assert response.json()["data"]["system"]["changeTimezone"]["success"] is False
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 400
assert response.json()["data"]["system"]["changeTimezone"]["timezone"] is None
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Moscow"
@ -479,10 +487,10 @@ def test_graphql_change_timezone_with_invalid_timezone(authorized_client, turned
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeTimezone"]["success"] is False
assert response.json()["data"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["changeTimezone"]["code"] == 400
assert response.json()["data"]["changeTimezone"]["timezone"] is None
assert response.json()["data"]["system"]["changeTimezone"]["success"] is False
assert response.json()["data"]["system"]["changeTimezone"]["message"] is not None
assert response.json()["data"]["system"]["changeTimezone"]["code"] == 400
assert response.json()["data"]["system"]["changeTimezone"]["timezone"] is None
assert read_json(turned_on / "turned_on.json")["timezone"] == "Europe/Moscow"
@ -585,12 +593,14 @@ def test_graphql_get_auto_upgrade_turned_off(authorized_client, turned_off):
API_CHANGE_AUTO_UPGRADE_SETTINGS = """
mutation changeServerSettings($settings: AutoUpgradeSettingsInput!) {
changeAutoUpgradeSettings(settings: $settings) {
success
message
code
enableAutoUpgrade
allowReboot
system {
changeAutoUpgradeSettings(settings: $settings) {
success
message
code
enableAutoUpgrade
allowReboot
}
}
}
"""
@ -629,14 +639,25 @@ def test_graphql_change_auto_upgrade(authorized_client, turned_on):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is False
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is True
)
assert read_json(turned_on / "turned_on.json")["autoUpgrade"]["enable"] is False
assert read_json(turned_on / "turned_on.json")["autoUpgrade"]["allowReboot"] is True
@ -657,14 +678,25 @@ def test_graphql_change_auto_upgrade_on_undefined(authorized_client, undefined_c
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is False
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is True
)
assert (
read_json(undefined_config / "undefined.json")["autoUpgrade"]["enable"] is False
)
@ -690,14 +722,25 @@ def test_graphql_change_auto_upgrade_without_vlaues(authorized_client, no_values
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is True
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
assert read_json(no_values / "no_values.json")["autoUpgrade"]["enable"] is True
assert read_json(no_values / "no_values.json")["autoUpgrade"]["allowReboot"] is True
@ -718,14 +761,25 @@ def test_graphql_change_auto_upgrade_turned_off(authorized_client, turned_off):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is True
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is True
assert (
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is True
@ -747,14 +801,25 @@ def test_grphql_change_auto_upgrade_without_enable(authorized_client, turned_off
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is False
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is True
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is True
)
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is False
assert (
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is True
@ -778,14 +843,25 @@ def test_graphql_change_auto_upgrade_without_allow_reboot(
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is False
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is False
)
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is True
assert (
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is False
@ -805,14 +881,25 @@ def test_graphql_change_auto_upgrade_with_empty_input(authorized_client, turned_
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["success"] is True
assert response.json()["data"]["changeAutoUpgradeSettings"]["message"] is not None
assert response.json()["data"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["changeAutoUpgradeSettings"]["enableAutoUpgrade"]
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["success"]
is True
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["message"]
is not None
)
assert response.json()["data"]["system"]["changeAutoUpgradeSettings"]["code"] == 200
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"][
"enableAutoUpgrade"
]
is False
)
assert (
response.json()["data"]["system"]["changeAutoUpgradeSettings"]["allowReboot"]
is False
)
assert response.json()["data"]["changeAutoUpgradeSettings"]["allowReboot"] is False
assert read_json(turned_off / "turned_off.json")["autoUpgrade"]["enable"] is False
assert (
read_json(turned_off / "turned_off.json")["autoUpgrade"]["allowReboot"] is False
@ -821,10 +908,12 @@ def test_graphql_change_auto_upgrade_with_empty_input(authorized_client, turned_
API_PULL_SYSTEM_CONFIGURATION_MUTATION = """
mutation testPullSystemConfiguration {
pullRepositoryChanges {
success
message
code
system {
pullRepositoryChanges {
success
message
code
}
}
}
"""
@ -855,9 +944,12 @@ def test_graphql_pull_system_configuration(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["pullRepositoryChanges"]["success"] is True
assert response.json()["data"]["pullRepositoryChanges"]["message"] is not None
assert response.json()["data"]["pullRepositoryChanges"]["code"] == 200
assert response.json()["data"]["system"]["pullRepositoryChanges"]["success"] is True
assert (
response.json()["data"]["system"]["pullRepositoryChanges"]["message"]
is not None
)
assert response.json()["data"]["system"]["pullRepositoryChanges"]["code"] == 200
assert mock_subprocess_popen.call_count == 1
assert mock_subprocess_popen.call_args[0][0] == ["git", "pull"]
@ -880,9 +972,14 @@ def test_graphql_pull_system_broken_repo(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["pullRepositoryChanges"]["success"] is False
assert response.json()["data"]["pullRepositoryChanges"]["message"] is not None
assert response.json()["data"]["pullRepositoryChanges"]["code"] == 500
assert (
response.json()["data"]["system"]["pullRepositoryChanges"]["success"] is False
)
assert (
response.json()["data"]["system"]["pullRepositoryChanges"]["message"]
is not None
)
assert response.json()["data"]["system"]["pullRepositoryChanges"]["code"] == 500
assert mock_broken_service.call_count == 1
assert mock_os_chdir.call_count == 2

View File

@ -54,10 +54,12 @@ def mock_subprocess_check_output(mocker):
API_REBUILD_SYSTEM_MUTATION = """
mutation rebuildSystem {
runSystemRebuild {
success
message
code
system {
runSystemRebuild {
success
message
code
}
}
}
"""
@ -86,9 +88,9 @@ def test_graphql_system_rebuild(authorized_client, mock_subprocess_popen):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["runSystemRebuild"]["success"] is True
assert response.json()["data"]["runSystemRebuild"]["message"] is not None
assert response.json()["data"]["runSystemRebuild"]["code"] == 200
assert response.json()["data"]["system"]["runSystemRebuild"]["success"] is True
assert response.json()["data"]["system"]["runSystemRebuild"]["message"] is not None
assert response.json()["data"]["system"]["runSystemRebuild"]["code"] == 200
assert mock_subprocess_popen.call_count == 1
assert mock_subprocess_popen.call_args[0][0] == [
"systemctl",
@ -99,10 +101,12 @@ def test_graphql_system_rebuild(authorized_client, mock_subprocess_popen):
API_UPGRADE_SYSTEM_MUTATION = """
mutation upgradeSystem {
runSystemUpgrade {
success
message
code
system {
runSystemUpgrade {
success
message
code
}
}
}
"""
@ -131,9 +135,9 @@ def test_graphql_system_upgrade(authorized_client, mock_subprocess_popen):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["runSystemUpgrade"]["success"] is True
assert response.json()["data"]["runSystemUpgrade"]["message"] is not None
assert response.json()["data"]["runSystemUpgrade"]["code"] == 200
assert response.json()["data"]["system"]["runSystemUpgrade"]["success"] is True
assert response.json()["data"]["system"]["runSystemUpgrade"]["message"] is not None
assert response.json()["data"]["system"]["runSystemUpgrade"]["code"] == 200
assert mock_subprocess_popen.call_count == 1
assert mock_subprocess_popen.call_args[0][0] == [
"systemctl",
@ -144,10 +148,12 @@ def test_graphql_system_upgrade(authorized_client, mock_subprocess_popen):
API_ROLLBACK_SYSTEM_MUTATION = """
mutation rollbackSystem {
runSystemRollback {
success
message
code
system {
runSystemRollback {
success
message
code
}
}
}
"""
@ -176,9 +182,9 @@ def test_graphql_system_rollback(authorized_client, mock_subprocess_popen):
)
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["runSystemRollback"]["success"] is True
assert response.json()["data"]["runSystemRollback"]["message"] is not None
assert response.json()["data"]["runSystemRollback"]["code"] == 200
assert response.json()["data"]["system"]["runSystemRollback"]["success"] is True
assert response.json()["data"]["system"]["runSystemRollback"]["message"] is not None
assert response.json()["data"]["system"]["runSystemRollback"]["code"] == 200
assert mock_subprocess_popen.call_count == 1
assert mock_subprocess_popen.call_args[0][0] == [
"systemctl",
@ -189,10 +195,12 @@ def test_graphql_system_rollback(authorized_client, mock_subprocess_popen):
API_REBOOT_SYSTEM_MUTATION = """
mutation system {
rebootSystem {
success
message
code
system {
rebootSystem {
success
message
code
}
}
}
"""
@ -223,9 +231,9 @@ def test_graphql_reboot_system(authorized_client, mock_subprocess_popen):
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["rebootSystem"]["success"] is True
assert response.json()["data"]["rebootSystem"]["message"] is not None
assert response.json()["data"]["rebootSystem"]["code"] == 200
assert response.json()["data"]["system"]["rebootSystem"]["success"] is True
assert response.json()["data"]["system"]["rebootSystem"]["message"] is not None
assert response.json()["data"]["system"]["rebootSystem"]["code"] == 200
assert mock_subprocess_popen.call_count == 1
assert mock_subprocess_popen.call_args[0][0] == ["reboot"]

View File

@ -294,13 +294,15 @@ def test_graphql_get_nonexistent_user(
API_CREATE_USERS_MUTATION = """
mutation createUser($user: UserMutationInput!) {
createUser(user: $user) {
success
message
code
user {
username
sshKeys
users {
createUser(user: $user) {
success
message
code
user {
username
sshKeys
}
}
}
}
@ -339,12 +341,12 @@ def test_graphql_add_user(authorized_client, one_user, mock_subprocess_popen):
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 201
assert response.json()["data"]["createUser"]["success"] is True
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 201
assert response.json()["data"]["users"]["createUser"]["success"] is True
assert response.json()["data"]["createUser"]["user"]["username"] == "user2"
assert response.json()["data"]["createUser"]["user"]["sshKeys"] == []
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user2"
assert response.json()["data"]["users"]["createUser"]["user"]["sshKeys"] == []
def test_graphql_add_undefined_settings(
@ -365,12 +367,12 @@ def test_graphql_add_undefined_settings(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 201
assert response.json()["data"]["createUser"]["success"] is True
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 201
assert response.json()["data"]["users"]["createUser"]["success"] is True
assert response.json()["data"]["createUser"]["user"]["username"] == "user2"
assert response.json()["data"]["createUser"]["user"]["sshKeys"] == []
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user2"
assert response.json()["data"]["users"]["createUser"]["user"]["sshKeys"] == []
def test_graphql_add_without_password(
@ -391,11 +393,11 @@ def test_graphql_add_without_password(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 400
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 400
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"] is None
assert response.json()["data"]["users"]["createUser"]["user"] is None
def test_graphql_add_without_both(authorized_client, one_user, mock_subprocess_popen):
@ -414,11 +416,11 @@ def test_graphql_add_without_both(authorized_client, one_user, mock_subprocess_p
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 400
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 400
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"] is None
assert response.json()["data"]["users"]["createUser"]["user"] is None
@pytest.mark.parametrize("username", invalid_usernames)
@ -440,11 +442,11 @@ def test_graphql_add_system_username(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 409
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 409
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"] is None
assert response.json()["data"]["users"]["createUser"]["user"] is None
def test_graphql_add_existing_user(authorized_client, one_user, mock_subprocess_popen):
@ -463,13 +465,13 @@ def test_graphql_add_existing_user(authorized_client, one_user, mock_subprocess_
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 409
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 409
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"]["username"] == "user1"
assert response.json()["data"]["users"]["createUser"]["user"]["username"] == "user1"
assert (
response.json()["data"]["createUser"]["user"]["sshKeys"][0]
response.json()["data"]["users"]["createUser"]["user"]["sshKeys"][0]
== "ssh-rsa KEY user1@pc"
)
@ -490,13 +492,15 @@ def test_graphql_add_main_user(authorized_client, one_user, mock_subprocess_pope
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 409
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 409
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"]["username"] == "tester"
assert (
response.json()["data"]["createUser"]["user"]["sshKeys"][0]
response.json()["data"]["users"]["createUser"]["user"]["username"] == "tester"
)
assert (
response.json()["data"]["users"]["createUser"]["user"]["sshKeys"][0]
== "ssh-rsa KEY test@pc"
)
@ -516,11 +520,11 @@ def test_graphql_add_long_username(authorized_client, one_user, mock_subprocess_
)
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 400
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 400
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"] is None
assert response.json()["data"]["users"]["createUser"]["user"] is None
@pytest.mark.parametrize("username", ["", "1", "фыр", "user1@", "^-^"])
@ -542,19 +546,21 @@ def test_graphql_add_invalid_username(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["createUser"]["message"] is not None
assert response.json()["data"]["createUser"]["code"] == 400
assert response.json()["data"]["createUser"]["success"] is False
assert response.json()["data"]["users"]["createUser"]["message"] is not None
assert response.json()["data"]["users"]["createUser"]["code"] == 400
assert response.json()["data"]["users"]["createUser"]["success"] is False
assert response.json()["data"]["createUser"]["user"] is None
assert response.json()["data"]["users"]["createUser"]["user"] is None
API_DELETE_USER_MUTATION = """
mutation deleteUser($username: String!) {
deleteUser(username: $username) {
success
message
code
users {
deleteUser(username: $username) {
success
message
code
}
}
}
"""
@ -582,9 +588,9 @@ def test_graphql_delete_user(authorized_client, some_users, mock_subprocess_pope
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["deleteUser"]["code"] == 200
assert response.json()["data"]["deleteUser"]["message"] is not None
assert response.json()["data"]["deleteUser"]["success"] is True
assert response.json()["data"]["users"]["deleteUser"]["code"] == 200
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
assert response.json()["data"]["users"]["deleteUser"]["success"] is True
@pytest.mark.parametrize("username", ["", "def"])
@ -601,9 +607,9 @@ def test_graphql_delete_nonexistent_users(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["deleteUser"]["code"] == 404
assert response.json()["data"]["deleteUser"]["message"] is not None
assert response.json()["data"]["deleteUser"]["success"] is False
assert response.json()["data"]["users"]["deleteUser"]["code"] == 404
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
@pytest.mark.parametrize("username", invalid_usernames)
@ -621,11 +627,11 @@ def test_graphql_delete_system_users(
assert response.json().get("data") is not None
assert (
response.json()["data"]["deleteUser"]["code"] == 404
or response.json()["data"]["deleteUser"]["code"] == 400
response.json()["data"]["users"]["deleteUser"]["code"] == 404
or response.json()["data"]["users"]["deleteUser"]["code"] == 400
)
assert response.json()["data"]["deleteUser"]["message"] is not None
assert response.json()["data"]["deleteUser"]["success"] is False
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
def test_graphql_delete_main_user(authorized_client, some_users, mock_subprocess_popen):
@ -639,20 +645,22 @@ def test_graphql_delete_main_user(authorized_client, some_users, mock_subprocess
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["deleteUser"]["code"] == 400
assert response.json()["data"]["deleteUser"]["message"] is not None
assert response.json()["data"]["deleteUser"]["success"] is False
assert response.json()["data"]["users"]["deleteUser"]["code"] == 400
assert response.json()["data"]["users"]["deleteUser"]["message"] is not None
assert response.json()["data"]["users"]["deleteUser"]["success"] is False
API_UPDATE_USER_MUTATION = """
mutation updateUser($user: UserMutationInput!) {
updateUser(user: $user) {
success
message
code
user {
username
sshKeys
users {
updateUser(user: $user) {
success
message
code
user {
username
sshKeys
}
}
}
}
@ -691,12 +699,12 @@ def test_graphql_update_user(authorized_client, some_users, mock_subprocess_pope
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["updateUser"]["code"] == 200
assert response.json()["data"]["updateUser"]["message"] is not None
assert response.json()["data"]["updateUser"]["success"] is True
assert response.json()["data"]["users"]["updateUser"]["code"] == 200
assert response.json()["data"]["users"]["updateUser"]["message"] is not None
assert response.json()["data"]["users"]["updateUser"]["success"] is True
assert response.json()["data"]["updateUser"]["user"]["username"] == "user1"
assert response.json()["data"]["updateUser"]["user"]["sshKeys"] == [
assert response.json()["data"]["users"]["updateUser"]["user"]["username"] == "user1"
assert response.json()["data"]["users"]["updateUser"]["user"]["sshKeys"] == [
"ssh-rsa KEY user1@pc"
]
assert mock_subprocess_popen.call_count == 1
@ -720,9 +728,9 @@ def test_graphql_update_nonexistent_user(
assert response.status_code == 200
assert response.json().get("data") is not None
assert response.json()["data"]["updateUser"]["code"] == 404
assert response.json()["data"]["updateUser"]["message"] is not None
assert response.json()["data"]["updateUser"]["success"] is False
assert response.json()["data"]["users"]["updateUser"]["code"] == 404
assert response.json()["data"]["users"]["updateUser"]["message"] is not None
assert response.json()["data"]["users"]["updateUser"]["success"] is False
assert response.json()["data"]["updateUser"]["user"] is None
assert response.json()["data"]["users"]["updateUser"]["user"] is None
assert mock_subprocess_popen.call_count == 1

View File

@ -1,6 +1,7 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import pytest
from time import sleep
from selfprivacy_api.jobs import Jobs, JobStatus
import selfprivacy_api.jobs as jobsmodule
@ -49,6 +50,20 @@ def test_remove_get_nonexistent(jobs_with_one_job):
assert jobs_with_one_job.get_job(uid_str) is None
def test_set_zeroing_ttl(jobs_with_one_job):
test_job = jobs_with_one_job.get_jobs()[0]
jobs_with_one_job.set_expiration(test_job, 0)
assert jobs_with_one_job.get_jobs() == []
def test_not_zeroing_ttl(jobs_with_one_job):
test_job = jobs_with_one_job.get_jobs()[0]
jobs_with_one_job.set_expiration(test_job, 1)
assert len(jobs_with_one_job.get_jobs()) == 1
sleep(1.2)
assert len(jobs_with_one_job.get_jobs()) == 0
def test_jobs(jobs_with_one_job):
jobs = jobs_with_one_job
test_job = jobs_with_one_job.get_jobs()[0]
@ -80,6 +95,29 @@ def test_jobs(jobs_with_one_job):
jobsmodule.JOB_EXPIRATION_SECONDS = backup
def test_finishing_equals_100(jobs_with_one_job):
jobs = jobs_with_one_job
test_job = jobs.get_jobs()[0]
assert not jobs.is_busy()
assert test_job.progress != 100
jobs.update(job=test_job, status=JobStatus.FINISHED)
assert test_job.progress == 100
def test_finishing_equals_100_unless_stated_otherwise(jobs_with_one_job):
jobs = jobs_with_one_job
test_job = jobs.get_jobs()[0]
assert not jobs.is_busy()
assert test_job.progress != 100
assert test_job.progress != 23
jobs.update(job=test_job, status=JobStatus.FINISHED, progress=23)
assert test_job.progress == 23
@pytest.fixture
def jobs():
j = Jobs()

View File

@ -0,0 +1,33 @@
import pytest
from pydantic import BaseModel
from datetime import datetime
from typing import Optional
from selfprivacy_api.utils.redis_model_storage import store_model_as_hash, hash_as_model
from selfprivacy_api.utils.redis_pool import RedisPool
TEST_KEY = "model_storage"
redis = RedisPool().get_connection()
@pytest.fixture()
def clean_redis():
redis.delete(TEST_KEY)
class DummyModel(BaseModel):
name: str
date: Optional[datetime]
def test_store_retrieve():
model = DummyModel(name="test", date=datetime.now())
store_model_as_hash(redis, TEST_KEY, model)
assert hash_as_model(redis, TEST_KEY, DummyModel) == model
def test_store_retrieve_none():
model = DummyModel(name="test", date=None)
store_model_as_hash(redis, TEST_KEY, model)
assert hash_as_model(redis, TEST_KEY, DummyModel) == model

View File

@ -1,506 +0,0 @@
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import json
import pytest
from selfprivacy_api.restic_controller import ResticStates
def read_json(file_path):
with open(file_path, "r") as f:
return json.load(f)
MOCKED_SNAPSHOTS = [
{
"time": "2021-12-06T09:05:04.224685677+03:00",
"tree": "b76152d1e716d86d420407ead05d9911f2b6d971fe1589c12b63e4de65b14d4e",
"paths": ["/var"],
"hostname": "test-host",
"username": "root",
"id": "f96b428f1ca1252089ea3e25cd8ee33e63fb24615f1cc07559ba907d990d81c5",
"short_id": "f96b428f",
},
{
"time": "2021-12-08T07:42:06.998894055+03:00",
"parent": "f96b428f1ca1252089ea3e25cd8ee33e63fb24615f1cc07559ba907d990d81c5",
"tree": "8379b4fdc9ee3e9bb7c322f632a7bed9fc334b0258abbf4e7134f8fe5b3d61b0",
"paths": ["/var"],
"hostname": "test-host",
"username": "root",
"id": "db96b36efec97e5ba385099b43f9062d214c7312c20138aee7b8bd2c6cd8995a",
"short_id": "db96b36e",
},
]
class ResticControllerMock:
snapshot_list = MOCKED_SNAPSHOTS
state = ResticStates.INITIALIZED
progress = 0
error_message = None
@pytest.fixture
def mock_restic_controller(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerMock,
)
return mock
class ResticControllerMockNoKey:
snapshot_list = []
state = ResticStates.NO_KEY
progress = 0
error_message = None
@pytest.fixture
def mock_restic_controller_no_key(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerMockNoKey,
)
return mock
class ResticControllerNotInitialized:
snapshot_list = []
state = ResticStates.NOT_INITIALIZED
progress = 0
error_message = None
@pytest.fixture
def mock_restic_controller_not_initialized(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerNotInitialized,
)
return mock
class ResticControllerInitializing:
snapshot_list = []
state = ResticStates.INITIALIZING
progress = 0
error_message = None
@pytest.fixture
def mock_restic_controller_initializing(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerInitializing,
)
return mock
class ResticControllerBackingUp:
snapshot_list = MOCKED_SNAPSHOTS
state = ResticStates.BACKING_UP
progress = 0.42
error_message = None
@pytest.fixture
def mock_restic_controller_backing_up(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerBackingUp,
)
return mock
class ResticControllerError:
snapshot_list = MOCKED_SNAPSHOTS
state = ResticStates.ERROR
progress = 0
error_message = "Error message"
@pytest.fixture
def mock_restic_controller_error(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerError,
)
return mock
class ResticControllerRestoring:
snapshot_list = MOCKED_SNAPSHOTS
state = ResticStates.RESTORING
progress = 0
error_message = None
@pytest.fixture
def mock_restic_controller_restoring(mocker):
mock = mocker.patch(
"selfprivacy_api.rest.services.ResticController",
autospec=True,
return_value=ResticControllerRestoring,
)
return mock
@pytest.fixture
def mock_restic_tasks(mocker):
mock = mocker.patch("selfprivacy_api.rest.services.restic_tasks", autospec=True)
return mock
@pytest.fixture
def undefined_settings(mocker, datadir):
mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=datadir / "undefined.json")
assert "backup" not in read_json(datadir / "undefined.json")
return datadir
@pytest.fixture
def some_settings(mocker, datadir):
mocker.patch(
"selfprivacy_api.utils.USERDATA_FILE", new=datadir / "some_values.json"
)
assert "backup" in read_json(datadir / "some_values.json")
assert read_json(datadir / "some_values.json")["backup"]["provider"] == "BACKBLAZE"
assert read_json(datadir / "some_values.json")["backup"]["accountId"] == "ID"
assert read_json(datadir / "some_values.json")["backup"]["accountKey"] == "KEY"
assert read_json(datadir / "some_values.json")["backup"]["bucket"] == "BUCKET"
return datadir
@pytest.fixture
def no_values(mocker, datadir):
mocker.patch("selfprivacy_api.utils.USERDATA_FILE", new=datadir / "no_values.json")
assert "backup" in read_json(datadir / "no_values.json")
assert "provider" not in read_json(datadir / "no_values.json")["backup"]
assert "accountId" not in read_json(datadir / "no_values.json")["backup"]
assert "accountKey" not in read_json(datadir / "no_values.json")["backup"]
assert "bucket" not in read_json(datadir / "no_values.json")["backup"]
return datadir
def test_get_snapshots_unauthorized(client, mock_restic_controller, mock_restic_tasks):
response = client.get("/services/restic/backup/list")
assert response.status_code == 401
def test_get_snapshots(authorized_client, mock_restic_controller, mock_restic_tasks):
response = authorized_client.get("/services/restic/backup/list")
assert response.status_code == 200
assert response.json() == MOCKED_SNAPSHOTS
def test_create_backup_unauthorized(client, mock_restic_controller, mock_restic_tasks):
response = client.put("/services/restic/backup/create")
assert response.status_code == 401
def test_create_backup(authorized_client, mock_restic_controller, mock_restic_tasks):
response = authorized_client.put("/services/restic/backup/create")
assert response.status_code == 200
assert mock_restic_tasks.start_backup.call_count == 1
def test_create_backup_without_key(
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
):
response = authorized_client.put("/services/restic/backup/create")
assert response.status_code == 400
assert mock_restic_tasks.start_backup.call_count == 0
def test_create_backup_initializing(
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
):
response = authorized_client.put("/services/restic/backup/create")
assert response.status_code == 400
assert mock_restic_tasks.start_backup.call_count == 0
def test_create_backup_backing_up(
authorized_client, mock_restic_controller_backing_up, mock_restic_tasks
):
response = authorized_client.put("/services/restic/backup/create")
assert response.status_code == 409
assert mock_restic_tasks.start_backup.call_count == 0
def test_check_backup_status_unauthorized(
client, mock_restic_controller, mock_restic_tasks
):
response = client.get("/services/restic/backup/status")
assert response.status_code == 401
def test_check_backup_status(
authorized_client, mock_restic_controller, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "INITIALIZED",
"progress": 0,
"error_message": None,
}
def test_check_backup_status_no_key(
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "NO_KEY",
"progress": 0,
"error_message": None,
}
def test_check_backup_status_not_initialized(
authorized_client, mock_restic_controller_not_initialized, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "NOT_INITIALIZED",
"progress": 0,
"error_message": None,
}
def test_check_backup_status_initializing(
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "INITIALIZING",
"progress": 0,
"error_message": None,
}
def test_check_backup_status_backing_up(
authorized_client, mock_restic_controller_backing_up
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "BACKING_UP",
"progress": 0.42,
"error_message": None,
}
def test_check_backup_status_error(
authorized_client, mock_restic_controller_error, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "ERROR",
"progress": 0,
"error_message": "Error message",
}
def test_check_backup_status_restoring(
authorized_client, mock_restic_controller_restoring, mock_restic_tasks
):
response = authorized_client.get("/services/restic/backup/status")
assert response.status_code == 200
assert response.json() == {
"status": "RESTORING",
"progress": 0,
"error_message": None,
}
def test_reload_unauthenticated(client, mock_restic_controller, mock_restic_tasks):
response = client.get("/services/restic/backup/reload")
assert response.status_code == 401
def test_backup_reload(authorized_client, mock_restic_controller, mock_restic_tasks):
response = authorized_client.get("/services/restic/backup/reload")
assert response.status_code == 200
assert mock_restic_tasks.load_snapshots.call_count == 1
def test_backup_restore_unauthorized(client, mock_restic_controller, mock_restic_tasks):
response = client.put("/services/restic/backup/restore")
assert response.status_code == 401
def test_backup_restore_without_backup_id(
authorized_client, mock_restic_controller, mock_restic_tasks
):
response = authorized_client.put("/services/restic/backup/restore", json={})
assert response.status_code == 422
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_with_nonexistent_backup_id(
authorized_client, mock_restic_controller, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "nonexistent"}
)
assert response.status_code == 404
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_no_key(
authorized_client, mock_restic_controller_no_key, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 400
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_not_initialized(
authorized_client, mock_restic_controller_not_initialized, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 400
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_initializing(
authorized_client, mock_restic_controller_initializing, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 400
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_backing_up(
authorized_client, mock_restic_controller_backing_up, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 409
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_restoring(
authorized_client, mock_restic_controller_restoring, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 409
assert mock_restic_tasks.restore_from_backup.call_count == 0
def test_backup_restore_when_error(
authorized_client, mock_restic_controller_error, mock_restic_tasks
):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 200
assert mock_restic_tasks.restore_from_backup.call_count == 1
def test_backup_restore(authorized_client, mock_restic_controller, mock_restic_tasks):
response = authorized_client.put(
"/services/restic/backup/restore", json={"backupId": "f96b428f"}
)
assert response.status_code == 200
assert mock_restic_tasks.restore_from_backup.call_count == 1
def test_set_backblaze_config_unauthorized(
client, mock_restic_controller, mock_restic_tasks, some_settings
):
response = client.put("/services/restic/backblaze/config")
assert response.status_code == 401
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
def test_set_backblaze_config_without_arguments(
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
):
response = authorized_client.put("/services/restic/backblaze/config")
assert response.status_code == 422
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
def test_set_backblaze_config_without_all_values(
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
):
response = authorized_client.put(
"/services/restic/backblaze/config",
json={"accountId": "123", "applicationKey": "456"},
)
assert response.status_code == 422
assert mock_restic_tasks.update_keys_from_userdata.call_count == 0
def test_set_backblaze_config(
authorized_client, mock_restic_controller, mock_restic_tasks, some_settings
):
response = authorized_client.put(
"/services/restic/backblaze/config",
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
)
assert response.status_code == 200
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
assert read_json(some_settings / "some_values.json")["backup"] == {
"provider": "BACKBLAZE",
"accountId": "123",
"accountKey": "456",
"bucket": "789",
}
def test_set_backblaze_config_on_undefined(
authorized_client, mock_restic_controller, mock_restic_tasks, undefined_settings
):
response = authorized_client.put(
"/services/restic/backblaze/config",
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
)
assert response.status_code == 200
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
assert read_json(undefined_settings / "undefined.json")["backup"] == {
"provider": "BACKBLAZE",
"accountId": "123",
"accountKey": "456",
"bucket": "789",
}
def test_set_backblaze_config_on_no_values(
authorized_client, mock_restic_controller, mock_restic_tasks, no_values
):
response = authorized_client.put(
"/services/restic/backblaze/config",
json={"accountId": "123", "accountKey": "456", "bucket": "789"},
)
assert response.status_code == 200
assert mock_restic_tasks.update_keys_from_userdata.call_count == 1
assert read_json(no_values / "no_values.json")["backup"] == {
"provider": "BACKBLAZE",
"accountId": "123",
"accountKey": "456",
"bucket": "789",
}

89
tests/test_services.py Normal file
View File

@ -0,0 +1,89 @@
"""
Tests for generic service methods
"""
from pytest import raises
from selfprivacy_api.services.bitwarden import Bitwarden
from selfprivacy_api.services.pleroma import Pleroma
from selfprivacy_api.services.owned_path import OwnedPath
from selfprivacy_api.services.generic_service_mover import FolderMoveNames
from selfprivacy_api.services.test_service import DummyService
from selfprivacy_api.services.service import Service, ServiceStatus, StoppedService
from selfprivacy_api.utils.waitloop import wait_until_true
from tests.test_graphql.test_backup import raw_dummy_service
def test_unimplemented_folders_raises():
with raises(NotImplementedError):
Service.get_folders()
with raises(NotImplementedError):
Service.get_owned_folders()
class OurDummy(DummyService, folders=["testydir", "dirtessimo"]):
pass
owned_folders = OurDummy.get_owned_folders()
assert owned_folders is not None
def test_service_stopper(raw_dummy_service):
dummy: Service = raw_dummy_service
dummy.set_delay(0.3)
assert dummy.get_status() == ServiceStatus.ACTIVE
with StoppedService(dummy) as stopped_dummy:
assert stopped_dummy.get_status() == ServiceStatus.INACTIVE
assert dummy.get_status() == ServiceStatus.INACTIVE
assert dummy.get_status() == ServiceStatus.ACTIVE
def test_delayed_start_stop(raw_dummy_service):
dummy = raw_dummy_service
dummy.set_delay(0.3)
dummy.stop()
assert dummy.get_status() == ServiceStatus.DEACTIVATING
wait_until_true(lambda: dummy.get_status() == ServiceStatus.INACTIVE)
assert dummy.get_status() == ServiceStatus.INACTIVE
dummy.start()
assert dummy.get_status() == ServiceStatus.ACTIVATING
wait_until_true(lambda: dummy.get_status() == ServiceStatus.ACTIVE)
assert dummy.get_status() == ServiceStatus.ACTIVE
def test_owned_folders_from_not_owned():
assert Bitwarden.get_owned_folders() == [
OwnedPath(
path=folder,
group="vaultwarden",
owner="vaultwarden",
)
for folder in Bitwarden.get_folders()
]
def test_paths_from_owned_paths():
assert len(Pleroma.get_folders()) == 2
assert Pleroma.get_folders() == [
ownedpath.path for ownedpath in Pleroma.get_owned_folders()
]
def test_foldermoves_from_ownedpaths():
owned = OwnedPath(
path="var/lib/bitwarden",
group="vaultwarden",
owner="vaultwarden",
)
assert FolderMoveNames.from_owned_path(owned) == FolderMoveNames(
name="bitwarden",
bind_location="var/lib/bitwarden",
group="vaultwarden",
owner="vaultwarden",
)