Compare commits

...

14 Commits

9 changed files with 272 additions and 50 deletions

View File

@ -29,6 +29,9 @@ class Backups:
Storage.store_testrepo_path(file_path)
Storage.store_provider(provider)
def set_provider(provider: AbstractBackupProvider):
Storage.store_provider(provider)
@staticmethod
def get_last_backed_up(service: Service) -> Optional[datetime]:
"""Get a timezone-aware time of the last backup of a service"""
@ -126,19 +129,21 @@ class Backups:
return Backups.lookup_provider()
@staticmethod
def set_provider(kind: str, login: str, key: str):
provider = Backups.construct_provider(kind, login, key)
def set_provider(kind: str, login: str, key: str, location: str, repo_id: str = ""):
provider = Backups.construct_provider(kind, login, key, location, id)
Storage.store_provider(provider)
@staticmethod
def construct_provider(kind: str, login: str, key: str):
def construct_provider(
kind: str, login: str, key: str, location: str, repo_id: str = ""
):
provider_class = get_provider(BackupProvider[kind])
if kind == "FILE":
path = Storage.get_testrepo_path()
return provider_class(path)
return provider_class(login=login, key=key)
return provider_class(login=login, key=key, location=location, repo_id=repo_id)
@staticmethod
def reset():
@ -169,17 +174,19 @@ class Backups:
if "backblaze" in user_data.keys():
account = user_data["backblaze"]["accountId"]
key = user_data["backblaze"]["accountKey"]
location = user_data["backblaze"]["bucket"]
provider_string = "BACKBLAZE"
return Backups.construct_provider(
kind=provider_string, login=account, key=key
kind=provider_string, login=account, key=key, location=location
)
return None
account = user_data["backup"]["accountId"]
key = user_data["backup"]["accountKey"]
provider_string = user_data["backup"]["provider"]
location = user_data["backup"]["bucket"]
return Backups.construct_provider(
kind=provider_string, login=account, key=key
kind=provider_string, login=account, key=key, location=location
)
@staticmethod
@ -188,7 +195,11 @@ class Backups:
if provider_model is None:
return None
return Backups.construct_provider(
provider_model.kind, provider_model.login, provider_model.key
provider_model.kind,
provider_model.login,
provider_model.key,
provider_model.location,
provider_model.repo_id,
)
@staticmethod
@ -212,39 +223,68 @@ class Backups:
raise e
Jobs.update(job, status=JobStatus.FINISHED)
return snapshot
@staticmethod
def init_repo(service: Service):
repo_name = service.get_id()
Backups.provider().backuper.init(repo_name)
Storage.mark_as_init(service)
def init_repo(service: Optional[Service] = None):
if service is not None:
repo_name = service.get_id()
Backups.provider().backuper.init()
Storage.mark_as_init()
@staticmethod
def is_initted(service: Service) -> bool:
repo_name = service.get_id()
if Storage.has_init_mark(service):
def is_initted() -> bool:
if Storage.has_init_mark():
return True
initted = Backups.provider().backuper.is_initted(repo_name)
initted = Backups.provider().backuper.is_initted()
if initted:
Storage.mark_as_init(service)
Storage.mark_as_init()
return True
return False
@staticmethod
def get_snapshots(service: Service) -> List[Snapshot]:
service_id = service.get_id()
cached_snapshots = Backups.get_cached_snapshots_service(service_id)
snapshots = Backups.get_all_snapshots()
return [snap for snap in snapshots if snap.service_name == service.get_id()]
@staticmethod
def get_all_snapshots() -> List[Snapshot]:
cached_snapshots = Storage.get_cached_snapshots()
if cached_snapshots != []:
return cached_snapshots
# TODO: the oldest snapshots will get expired faster than the new ones.
# How to detect that the end is missing?
upstream_snapshots = Backups.provider().backuper.get_snapshots(service_id)
Backups.sync_service_snapshots(service_id, upstream_snapshots)
upstream_snapshots = Backups.provider().backuper.get_snapshots()
Backups.sync_all_snapshots()
return upstream_snapshots
@staticmethod
def get_snapshot_by_id(id: str) -> Optional[Snapshot]:
snap = Storage.get_cached_snapshot_by_id(id)
if snap is not None:
return snap
# Possibly our cache entry got invalidated, let's try one more time
Backups.sync_all_snapshots()
snap = Storage.get_cached_snapshot_by_id(id)
return snap
@staticmethod
def force_snapshot_reload():
Backups.sync_all_snapshots()
@staticmethod
def sync_all_snapshots():
upstream_snapshots = Backups.provider().backuper.get_snapshots()
Storage.invalidate_snapshot_storage()
for snapshot in upstream_snapshots:
Storage.cache_snapshot(snapshot)
@staticmethod
def restore_service_from_snapshot(service: Service, snapshot_id: str):
repo_name = service.get_id()

View File

@ -12,6 +12,11 @@ class AbstractBackupProvider(ABC):
def backuper(self) -> AbstractBackuper:
raise NotImplementedError
def __init__(self, login="", key=""):
def __init__(self, login="", key="", location="", repo_id=""):
self.backuper.set_creds(login, key, location)
self.login = login
self.key = key
self.location = location
# We do not need to do anything with this one
# Just remember in case the app forgets
self.repo_id = repo_id

View File

@ -145,7 +145,7 @@ class ResticBackuper(AbstractBackuper):
service_name=repo_name,
)
def init(self, repo_name):
def init(self):
init_command = self.restic_command(
"init",
)
@ -159,7 +159,7 @@ class ResticBackuper(AbstractBackuper):
if not "created restic repository" in output:
raise ValueError("cannot init a repo: " + output)
def is_initted(self, repo_name: str) -> bool:
def is_initted(self) -> bool:
command = self.restic_command(
"check",
"--json",
@ -212,7 +212,7 @@ class ResticBackuper(AbstractBackuper):
if "restoring" not in output:
raise ValueError("cannot restore a snapshot: " + output)
def _load_snapshots(self, repo_name) -> object:
def _load_snapshots(self) -> object:
"""
Load list of snapshots from repository
raises Value Error if repo does not exist
@ -237,10 +237,10 @@ class ResticBackuper(AbstractBackuper):
except ValueError as e:
raise ValueError("Cannot load snapshots: ") from e
def get_snapshots(self, repo_name) -> List[Snapshot]:
def get_snapshots(self) -> List[Snapshot]:
"""Get all snapshots from the repo"""
snapshots = []
for restic_snapshot in self._load_snapshots(repo_name):
for restic_snapshot in self._load_snapshots():
snapshot = Snapshot(
id=restic_snapshot["short_id"],
created_at=restic_snapshot["time"],

View File

@ -47,6 +47,11 @@ class Storage:
for key in redis.keys(prefix + "*"):
redis.delete(key)
@staticmethod
def invalidate_snapshot_storage():
for key in redis.keys(REDIS_SNAPSHOTS_PREFIX + "*"):
redis.delete(key)
@staticmethod
def store_testrepo_path(path: str):
redis.set(REDIS_REPO_PATH_KEY, path)
@ -97,6 +102,13 @@ class Storage:
snapshot_key = Storage.__snapshot_key(snapshot)
redis.delete(snapshot_key)
@staticmethod
def get_cached_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
key = REDIS_SNAPSHOTS_PREFIX + snapshot_id
if not redis.exists(key):
return None
return hash_as_model(redis, key, Snapshot)
@staticmethod
def get_cached_snapshots() -> List[Snapshot]:
keys = redis.keys(REDIS_SNAPSHOTS_PREFIX + "*")
@ -146,7 +158,11 @@ class Storage:
redis,
REDIS_PROVIDER_KEY,
BackupProviderModel(
kind=get_kind(provider), login=provider.login, key=provider.key
kind=get_kind(provider),
login=provider.login,
key=provider.key,
location=provider.location,
repo_id=provider.repo_id,
),
)
@ -156,13 +172,11 @@ class Storage:
return provider_model
@staticmethod
def has_init_mark(service: Service) -> bool:
repo_name = service.get_id()
if redis.exists(REDIS_INITTED_CACHE_PREFIX + repo_name):
def has_init_mark() -> bool:
if redis.exists(REDIS_INITTED_CACHE_PREFIX):
return True
return False
@staticmethod
def mark_as_init(service: Service):
repo_name = service.get_id()
redis.set(REDIS_INITTED_CACHE_PREFIX + repo_name, 1)
def mark_as_init():
redis.set(REDIS_INITTED_CACHE_PREFIX, 1)

View File

@ -108,6 +108,13 @@ class Service:
return None
@strawberry.type
class SnapshotInfo:
id: str
service: "Service"
created_at: datetime.datetime
def service_to_graphql_service(service: ServiceInterface) -> Service:
"""Convert service to graphql service"""
return Service(

View File

@ -0,0 +1,106 @@
import datetime
import typing
import strawberry
from strawberry.types import Info
from selfprivacy_api.graphql import IsAuthenticated
from selfprivacy_api.graphql.mutations.mutation_interface import (
GenericMutationReturn,
MutationReturnInterface,
)
from selfprivacy_api.graphql.queries.backup import BackupConfiguration
from selfprivacy_api.graphql.queries.backup import Backup
from selfprivacy_api.graphql.queries.providers import BackupProvider
from selfprivacy_api.backup import Backups
from selfprivacy_api.services import get_all_services, get_service_by_id
from selfprivacy_api.backup.tasks import start_backup, restore_snapshot
@strawberry.input
class InitializeRepositoryInput:
"""Initialize repository input"""
provider: BackupProvider
# The following field may become optional for other providers?
# Backblaze takes bucket id and name
location_id: str
location_name: str
# Key ID and key for Backblaze
login: str
password: str
@strawberry.type
class GenericBackupConfigReturn(MutationReturnInterface):
"""Generic backup config return"""
configuration: typing.Optional[BackupConfiguration]
class GenericJobMutationReturn:
pass
@strawberry.type
class BackupMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def initialize_repository(
self, repository: InitializeRepositoryInput
) -> GenericBackupConfigReturn:
"""Initialize a new repository"""
provider = Backups.construct_provider(
kind=repository.provider,
login=repository.login,
key=repository.password,
location=repository.location_name,
repo_id=repository.location_id,
)
Backups.set_provider(provider)
Backups.init_repo()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def remove_repository(self) -> GenericBackupConfigReturn:
"""Remove repository"""
Backups.reset()
return Backup.configuration()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def set_autobackup_period(
self, period: typing.Optional[int] = None
) -> GenericBackupConfigReturn:
"""Set autobackup period. None is to disable autobackup"""
Backups.set_autobackup_period_minutes(period)
return Backup.configuration()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def start_backup(
self, service_id: typing.Optional[str] = None
) -> GenericJobMutationReturn:
"""Start backup. If service not provided, backup all services"""
if service_id is None:
for service in get_all_services():
start_backup(service)
else:
service = get_service_by_id(service_id)
if service is None:
raise ValueError(f"nonexistent service: {service_id}")
start_backup(service)
return GenericJobMutationReturn()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def restore_backup(self, snapshot_id: str) -> GenericJobMutationReturn:
"""Restore backup"""
snap = Backups.get_snapshot_by_id(snapshot_id)
if snap in None:
raise ValueError(f"No such snapshot: {snapshot_id}")
restore_snapshot(snap)
return GenericJobMutationReturn()
@strawberry.mutation(permission_classes=[IsAuthenticated])
def force_snapshots_reload(self) -> GenericMutationReturn:
"""Force snapshots reload"""
Backups.force_snapshot_reload()
return GenericMutationReturn()

View File

@ -2,13 +2,46 @@
# pylint: disable=too-few-public-methods
import typing
import strawberry
from selfprivacy_api.graphql.common_types.backup_snapshot import SnapshotInfo
from selfprivacy_api.backup import Backups
from selfprivacy_api.backup.local_secret import LocalBackupSecret
from selfprivacy_api.graphql.queries.providers import BackupProvider
from selfprivacy_api.graphql.common_types.service import SnapshotInfo
@strawberry.type
class BackupConfiguration:
provider: BackupProvider
# When server is lost, the app should have the key to decrypt backups on a new server
encryption_key: str
# If none, autobackups are disabled
autobackup_period: typing.Optional[int] = None
# Bucket name for Backblaze, path for some other providers
location_name: typing.Optional[str] = None
location_id: typing.Optional[str] = None
# False when repo is not initialized and not ready to be used
is_initialized: bool
@strawberry.type
class Backup:
backend: str
@strawberry.field
def configuration() -> BackupConfiguration:
config = BackupConfiguration()
config.encryption_key = LocalBackupSecret.get()
config.is_initialized = Backups.is_initted()
config.autobackup_period = Backups.autobackup_period_minutes()
config.location_name = Backups.provider().location
config.location_id = Backups.provider().repo_id
@strawberry.field
def get_backups(self) -> typing.List[SnapshotInfo]:
return []
def all_snapshots(self) -> typing.List[SnapshotInfo]:
result = []
snapshots = Backups.get_all_snapshots()
for snap in snapshots:
graphql_snap = SnapshotInfo(
id=snap.id, service=snap.service_name, created_at=snap.created_at
)
result.append(graphql_snap)
return result

View File

@ -7,3 +7,5 @@ class BackupProviderModel(BaseModel):
kind: str
login: str
key: str
location: str
repo_id: str # for app usage, not for us

View File

@ -109,6 +109,9 @@ def test_config_load(generic_userdata):
assert provider.login == "ID"
assert provider.key == "KEY"
assert provider.backuper.account == "ID"
assert provider.backuper.key == "KEY"
def test_select_backend():
provider = providers.get_provider(BackupProvider.BACKBLAZE)
@ -117,7 +120,7 @@ def test_select_backend():
def test_file_backend_init(file_backup):
file_backup.backuper.init("somerepo")
file_backup.backuper.init()
def test_backup_simple_file(raw_dummy_service, file_backup):
@ -127,7 +130,7 @@ def test_backup_simple_file(raw_dummy_service, file_backup):
assert file_backup is not None
name = service.get_id()
file_backup.backuper.init(name)
file_backup.backuper.init()
def test_backup_service(dummy_service, backups):
@ -148,7 +151,7 @@ def test_backup_service(dummy_service, backups):
def test_no_repo(memory_backup):
with pytest.raises(ValueError):
assert memory_backup.backuper.get_snapshots("") == []
assert memory_backup.backuper.get_snapshots() == []
def test_one_snapshot(backups, dummy_service):
@ -214,11 +217,11 @@ def test_sizing(backups, dummy_service):
def test_init_tracking(backups, raw_dummy_service):
assert Backups.is_initted(raw_dummy_service) is False
assert Backups.is_initted() is False
Backups.init_repo(raw_dummy_service)
Backups.init_repo()
assert Backups.is_initted(raw_dummy_service) is True
assert Backups.is_initted() is True
def finished_jobs():
@ -240,6 +243,18 @@ def assert_job_had_progress(job_type):
assert len(Jobs.progress_updates(job)) > 0
def test_snapshots_by_id(backups, dummy_service):
snap1 = Backups.back_up(dummy_service)
snap2 = Backups.back_up(dummy_service)
snap3 = Backups.back_up(dummy_service)
assert snap2.id is not None
assert snap2.id != ""
assert len(Backups.get_snapshots(dummy_service)) == 3
assert Backups.get_snapshot_by_id(snap2.id).id == snap2.id
def test_backup_service_task(backups, dummy_service):
handle = start_backup(dummy_service)
handle(blocking=True)
@ -411,21 +426,21 @@ def test_snapshots_caching(backups, dummy_service):
# Storage
def test_init_tracking_caching(backups, raw_dummy_service):
assert Storage.has_init_mark(raw_dummy_service) is False
assert Storage.has_init_mark() is False
Storage.mark_as_init(raw_dummy_service)
Storage.mark_as_init()
assert Storage.has_init_mark(raw_dummy_service) is True
assert Backups.is_initted(raw_dummy_service) is True
assert Storage.has_init_mark() is True
assert Backups.is_initted() is True
# Storage
def test_init_tracking_caching2(backups, raw_dummy_service):
assert Storage.has_init_mark(raw_dummy_service) is False
assert Storage.has_init_mark() is False
Backups.init_repo(raw_dummy_service)
Backups.init_repo()
assert Storage.has_init_mark(raw_dummy_service) is True
assert Storage.has_init_mark() is True
# Storage