Compare commits
No commits in common. "master" and "v.1.0.1" have entirely different histories.
|
@ -1,2 +0,0 @@
|
||||||
[run]
|
|
||||||
source = selfprivacy_api
|
|
28
.drone.yml
28
.drone.yml
|
@ -1,28 +0,0 @@
|
||||||
kind: pipeline
|
|
||||||
type: exec
|
|
||||||
name: default
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Run Tests and Generate Coverage Report
|
|
||||||
commands:
|
|
||||||
- nix flake check -L
|
|
||||||
- sonar-scanner -Dsonar.projectKey=SelfPrivacy-REST-API -Dsonar.sources=. -Dsonar.host.url=http://analyzer.lan:9000 -Dsonar.login="$SONARQUBE_TOKEN"
|
|
||||||
environment:
|
|
||||||
SONARQUBE_TOKEN:
|
|
||||||
from_secret: SONARQUBE_TOKEN
|
|
||||||
|
|
||||||
|
|
||||||
- name: Run Bandit Checks
|
|
||||||
commands:
|
|
||||||
- bandit -ll -r selfprivacy_api
|
|
||||||
|
|
||||||
- name: Run Code Formatting Checks
|
|
||||||
commands:
|
|
||||||
- black --check .
|
|
||||||
|
|
||||||
node:
|
|
||||||
server: builder
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
- push
|
|
4
.flake8
4
.flake8
|
@ -1,4 +0,0 @@
|
||||||
[flake8]
|
|
||||||
max-line-length = 80
|
|
||||||
select = C,E,F,W,B,B950
|
|
||||||
extend-ignore = E203, E501
|
|
|
@ -1,153 +0,0 @@
|
||||||
users.nix
|
|
||||||
|
|
||||||
### Flask ###
|
|
||||||
instance/*
|
|
||||||
!instance/.gitignore
|
|
||||||
.webassets-cache
|
|
||||||
.env
|
|
||||||
|
|
||||||
### Flask.Python Stack ###
|
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
.pybuilder/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
# .python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
|
||||||
.spyderproject
|
|
||||||
.spyproject
|
|
||||||
|
|
||||||
# Rope project settings
|
|
||||||
.ropeproject
|
|
||||||
|
|
||||||
# mkdocs documentation
|
|
||||||
/site
|
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
.dmypy.json
|
|
||||||
dmypy.json
|
|
||||||
|
|
||||||
# Pyre type checker
|
|
||||||
.pyre/
|
|
||||||
|
|
||||||
# pytype static type analyzer
|
|
||||||
.pytype/
|
|
||||||
|
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
# End of https://www.toptal.com/developers/gitignore/api/flask
|
|
||||||
|
|
||||||
*.db
|
|
||||||
*.rdb
|
|
||||||
|
|
||||||
/result
|
|
||||||
/.nixos-test-history
|
|
|
@ -1,8 +0,0 @@
|
||||||
# Default ignored files
|
|
||||||
/shelf/
|
|
||||||
/workspace.xml
|
|
||||||
# Editor-based HTTP Client requests
|
|
||||||
/httpRequests/
|
|
||||||
# Datasource local storage ignored files
|
|
||||||
/dataSources/
|
|
||||||
/dataSources.local.xml
|
|
|
@ -1,6 +0,0 @@
|
||||||
<component name="InspectionProjectProfileManager">
|
|
||||||
<settings>
|
|
||||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
|
||||||
<version value="1.0" />
|
|
||||||
</settings>
|
|
||||||
</component>
|
|
|
@ -1,4 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
|
|
||||||
</project>
|
|
|
@ -1,8 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectModuleManager">
|
|
||||||
<modules>
|
|
||||||
<module fileurl="file://$PROJECT_DIR$/.idea/selfprivacy-rest-api.iml" filepath="$PROJECT_DIR$/.idea/selfprivacy-rest-api.iml" />
|
|
||||||
</modules>
|
|
||||||
</component>
|
|
||||||
</project>
|
|
|
@ -1,15 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<module type="PYTHON_MODULE" version="4">
|
|
||||||
<component name="NewModuleRootManager">
|
|
||||||
<content url="file://$MODULE_DIR$" />
|
|
||||||
<orderEntry type="jdk" jdkName="Python 3.9" jdkType="Python SDK" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
|
||||||
</component>
|
|
||||||
<component name="PyDocumentationSettings">
|
|
||||||
<option name="format" value="PLAIN" />
|
|
||||||
<option name="myDocStringFormat" value="Plain" />
|
|
||||||
</component>
|
|
||||||
<component name="TestRunnerService">
|
|
||||||
<option name="PROJECT_TEST_RUNNER" value="py.test" />
|
|
||||||
</component>
|
|
||||||
</module>
|
|
|
@ -1,12 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="CommitMessageInspectionProfile">
|
|
||||||
<profile version="1.0">
|
|
||||||
<inspection_tool class="CommitFormat" enabled="true" level="WARNING" enabled_by_default="true" />
|
|
||||||
<inspection_tool class="CommitNamingConvention" enabled="true" level="WARNING" enabled_by_default="true" />
|
|
||||||
</profile>
|
|
||||||
</component>
|
|
||||||
<component name="VcsDirectoryMappings">
|
|
||||||
<mapping directory="" vcs="Git" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
|
@ -1,6 +0,0 @@
|
||||||
[MASTER]
|
|
||||||
init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))"
|
|
||||||
extension-pkg-whitelist=pydantic
|
|
||||||
|
|
||||||
[FORMAT]
|
|
||||||
max-line-length=88
|
|
|
@ -1,19 +0,0 @@
|
||||||
{
|
|
||||||
// Use IntelliSense to learn about possible attributes.
|
|
||||||
// Hover to view descriptions of existing attributes.
|
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
||||||
"version": "0.2.0",
|
|
||||||
"configurations": [
|
|
||||||
{
|
|
||||||
"name": "Python: FastAPI",
|
|
||||||
"type": "python",
|
|
||||||
"request": "launch",
|
|
||||||
"module": "uvicorn",
|
|
||||||
"args": [
|
|
||||||
"selfprivacy_api.app:app"
|
|
||||||
],
|
|
||||||
"jinja": true,
|
|
||||||
"justMyCode": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"python.formatting.provider": "black",
|
|
||||||
"python.linting.pylintEnabled": true,
|
|
||||||
"python.linting.enabled": true,
|
|
||||||
"python.testing.pytestArgs": [
|
|
||||||
"tests"
|
|
||||||
],
|
|
||||||
"python.testing.unittestEnabled": false,
|
|
||||||
"python.testing.pytestEnabled": true,
|
|
||||||
"python.languageServer": "Pylance",
|
|
||||||
"python.analysis.typeCheckingMode": "basic"
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
# SelfPrivacy API contributors guide
|
|
||||||
|
|
||||||
Instructions for [VScode](https://code.visualstudio.com) or [VScodium](https://github.com/VSCodium/vscodium) under Unix-like platform.
|
|
||||||
|
|
||||||
1. **To get started, create an account for yourself on the** [**SelfPrivacy Gitea**](https://git.selfprivacy.org/user/sign_up). Proceed to fork
|
|
||||||
the [repository](https://git.selfprivacy.org/SelfPrivacy/selfprivacy-rest-api), and clone it on your local computer:
|
|
||||||
|
|
||||||
```git clone https://git.selfprivacy.org/your_user_name/selfprivacy-rest-api```
|
|
||||||
|
|
||||||
2. **Install Nix**
|
|
||||||
|
|
||||||
```sh <(curl -L https://nixos.org/nix/install)```
|
|
||||||
|
|
||||||
For detailed installation information, please review and follow: [link](https://nixos.org/manual/nix/stable/installation/installing-binary.html#installing-a-binary-distribution).
|
|
||||||
|
|
||||||
3. **Change directory to the cloned repository and start a nix shell:**
|
|
||||||
|
|
||||||
```cd selfprivacy-rest-api && nix-shell```
|
|
||||||
|
|
||||||
Nix will install all of the necessary packages for development work, all further actions will take place only within nix-shell.
|
|
||||||
|
|
||||||
4. **Install these plugins for VScode/VScodium**
|
|
||||||
|
|
||||||
Required: ```ms-python.python```, ```ms-python.vscode-pylance```
|
|
||||||
|
|
||||||
Optional, but highly recommended: ```ms-python.black-formatter```, ```bbenoist.Nix```, ```ryanluker.vscode-coverage-gutters```
|
|
||||||
|
|
||||||
5. **Set the path to the python interpreter from the nix store.** To do this, execute the command:
|
|
||||||
|
|
||||||
```whereis python```
|
|
||||||
|
|
||||||
Copy the path that starts with ```/nix/store/``` and ends with ```env/bin/python```
|
|
||||||
|
|
||||||
```/nix/store/???-python3-3.9.??-env/bin/python```
|
|
||||||
|
|
||||||
Click on the python version selection in the lower right corner, and replace the path to the interpreter in the project with the one you copied from the terminal.
|
|
||||||
|
|
||||||
6. **Congratulations :) Now you can develop new changes and test the project locally in a Nix environment.**
|
|
||||||
|
|
||||||
## What do you need to know before starting development work?
|
|
||||||
- RestAPI is no longer utilized, the project has moved to [GraphQL](https://graphql.org), however, the API functionality still works on Rest
|
|
||||||
|
|
||||||
|
|
||||||
## What to do after making changes to the repository?
|
|
||||||
|
|
||||||
**Run unit tests** using ```pytest .```
|
|
||||||
Make sure that all tests pass successfully and the API works correctly. For convenience, you can use the built-in VScode interface.
|
|
||||||
|
|
||||||
How to review the percentage of code coverage? Execute the command:
|
|
||||||
|
|
||||||
```coverage run -m pytest && coverage xml && coverage report```
|
|
||||||
|
|
||||||
Next, use the recommended extension ```ryanluker.vscode-coverage-gutters```, navigate to one of the test files, and click the "watch" button on the bottom panel of VScode.
|
|
||||||
|
|
||||||
**Format (linting) code**, we use [black](https://pypi.org/project/black/) formatting, enter
|
|
||||||
```black .``` to automatically format files, or use the recommended extension.
|
|
||||||
|
|
||||||
**And please remember, we have adopted** [**commit naming convention**](https://www.conventionalcommits.org/en/v1.0.0/), follow the link for more information.
|
|
||||||
|
|
||||||
Please request a review from at least one of the other maintainers. If you are not sure who to request, request a review from SelfPrivacy/Devs team.
|
|
||||||
|
|
||||||
## Helpful links!
|
|
||||||
|
|
||||||
**SelfPrivacy Contributor chat :3**
|
|
||||||
|
|
||||||
- [**Telegram:** @selfprivacy_dev](https://t.me/selfprivacy_dev)
|
|
||||||
- [**Matrix:** #dev:selfprivacy.org](https://matrix.to/#/#dev:selfprivacy.org)
|
|
||||||
|
|
||||||
**Helpful material to review:**
|
|
||||||
|
|
||||||
- [GraphQL Query Language Documentation](https://graphql.org/)
|
|
||||||
- [Documentation Strawberry - python library for working with GraphQL](https://strawberry.rocks/docs/)
|
|
||||||
- [Nix Documentation](https://nixos.org/guides/ad-hoc-developer-environments.html)
|
|
||||||
|
|
||||||
### Track your time
|
|
||||||
|
|
||||||
If you are working on a task, please track your time and add it to the commit message. For example:
|
|
||||||
|
|
||||||
```
|
|
||||||
feat: add new feature
|
|
||||||
|
|
||||||
- did some work
|
|
||||||
- did some more work
|
|
||||||
|
|
||||||
fixes #4, spent @1h30m
|
|
||||||
```
|
|
||||||
|
|
||||||
[Timewarrior](https://timewarrior.net/) is a good tool for tracking time.
|
|
661
LICENSE
661
LICENSE
|
@ -1,661 +0,0 @@
|
||||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 19 November 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU Affero General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works, specifically designed to ensure
|
|
||||||
cooperation with the community in the case of network server software.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
our General Public Licenses are intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
Developers that use our General Public Licenses protect your rights
|
|
||||||
with two steps: (1) assert copyright on the software, and (2) offer
|
|
||||||
you this License which gives you legal permission to copy, distribute
|
|
||||||
and/or modify the software.
|
|
||||||
|
|
||||||
A secondary benefit of defending all users' freedom is that
|
|
||||||
improvements made in alternate versions of the program, if they
|
|
||||||
receive widespread use, become available for other developers to
|
|
||||||
incorporate. Many developers of free software are heartened and
|
|
||||||
encouraged by the resulting cooperation. However, in the case of
|
|
||||||
software used on network servers, this result may fail to come about.
|
|
||||||
The GNU General Public License permits making a modified version and
|
|
||||||
letting the public access it on a server without ever releasing its
|
|
||||||
source code to the public.
|
|
||||||
|
|
||||||
The GNU Affero General Public License is designed specifically to
|
|
||||||
ensure that, in such cases, the modified source code becomes available
|
|
||||||
to the community. It requires the operator of a network server to
|
|
||||||
provide the source code of the modified version running there to the
|
|
||||||
users of that server. Therefore, public use of a modified version, on
|
|
||||||
a publicly accessible server, gives the public access to the source
|
|
||||||
code of the modified version.
|
|
||||||
|
|
||||||
An older license, called the Affero General Public License and
|
|
||||||
published by Affero, was designed to accomplish similar goals. This is
|
|
||||||
a different license, not a version of the Affero GPL, but Affero has
|
|
||||||
released a new version of the Affero GPL which permits relicensing under
|
|
||||||
this license.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, if you modify the
|
|
||||||
Program, your modified version must prominently offer all users
|
|
||||||
interacting with it remotely through a computer network (if your version
|
|
||||||
supports such interaction) an opportunity to receive the Corresponding
|
|
||||||
Source of your version by providing access to the Corresponding Source
|
|
||||||
from a network server at no charge, through some standard or customary
|
|
||||||
means of facilitating copying of software. This Corresponding Source
|
|
||||||
shall include the Corresponding Source for any work covered by version 3
|
|
||||||
of the GNU General Public License that is incorporated pursuant to the
|
|
||||||
following paragraph.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the work with which it is combined will remain governed by version
|
|
||||||
3 of the GNU General Public License.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU Affero General Public License from time to time. Such new versions
|
|
||||||
will be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU Affero General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU Affero General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU Affero General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Affero General Public License as published
|
|
||||||
by the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If your software can interact with users remotely through a computer
|
|
||||||
network, you should also make sure that it provides a way for users to
|
|
||||||
get its source. For example, if your program is a web application, its
|
|
||||||
interface could display a "Source" link that leads users to an archive
|
|
||||||
of the code. There are many ways you could offer source, and different
|
|
||||||
solutions will be better for different programs; see section 13 for the
|
|
||||||
specific requirements.
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
92
README.md
92
README.md
|
@ -1,92 +0,0 @@
|
||||||
# SelfPrivacy GraphQL API which allows app to control your server
|
|
||||||
|
|
||||||
![CI status](https://ci.selfprivacy.org/api/badges/SelfPrivacy/selfprivacy-rest-api/status.svg)
|
|
||||||
|
|
||||||
## Build
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ nix build
|
|
||||||
```
|
|
||||||
|
|
||||||
In case of successful build, you should get the `./result` symlink to a folder (in `/nix/store`) with build contents.
|
|
||||||
|
|
||||||
## Develop
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ nix develop
|
|
||||||
[SP devshell:/dir/selfprivacy-rest-api]$ python
|
|
||||||
Python 3.10.13 (main, Aug 24 2023, 12:59:26) [GCC 12.3.0] on linux
|
|
||||||
Type "help", "copyright", "credits" or "license" for more information.
|
|
||||||
(ins)>>>
|
|
||||||
```
|
|
||||||
|
|
||||||
If you don't have experimental flakes enabled, you can use the following command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ nix --extra-experimental-features nix-command --extra-experimental-features flakes develop
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
Run the test suite by running coverage with pytest inside an ephemeral NixOS VM with redis service enabled:
|
|
||||||
```console
|
|
||||||
$ nix flake check -L
|
|
||||||
```
|
|
||||||
|
|
||||||
Run the same test suite, but additionally create `./result/coverage.xml` in the current directory:
|
|
||||||
```console
|
|
||||||
$ nix build .#checks.x86_64-linux.default -L
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, just print the path to `/nix/store/...coverage.xml` without creating any files in the current directory:
|
|
||||||
```console
|
|
||||||
$ nix build .#checks.x86_64-linux.default -L --print-out-paths --no-link
|
|
||||||
```
|
|
||||||
|
|
||||||
Run the same test suite with arbitrary pytest options:
|
|
||||||
```console
|
|
||||||
$ pytest-vm.sh # specify pytest options here, e.g. `--last-failed`
|
|
||||||
```
|
|
||||||
When running using the script, pytest cache is preserved between runs in `.pytest_cache` folder.
|
|
||||||
NixOS VM state temporary resides in `${TMPDIR:=/tmp}/nixos-vm-tmp-dir/vm-state-machine` during the test.
|
|
||||||
Git workdir directory is shared read-write with VM via `.nixos-vm-tmp-dir/shared-xchg` symlink. VM accesses workdir contents via `/tmp/shared` mount point and `/root/source` symlink.
|
|
||||||
|
|
||||||
Launch VM and execute commands manually either in Linux console (user `root`) or using python NixOS tests driver API (refer to [NixOS documentation](https://nixos.org/manual/nixos/stable/#ssec-machine-objects)):
|
|
||||||
```console
|
|
||||||
$ nix run .#checks.x86_64-linux.default.driverInteractive
|
|
||||||
```
|
|
||||||
|
|
||||||
You can add `--keep-vm-state` in order to keep VM state between runs:
|
|
||||||
```console
|
|
||||||
$ TMPDIR=".nixos-vm-tmp-dir" nix run .#checks.x86_64-linux.default.driverInteractive --keep-vm-state
|
|
||||||
```
|
|
||||||
|
|
||||||
Option `-L`/`--print-build-logs` is optional for all nix commands. It tells nix to print each log line one after another instead of overwriting a single one.
|
|
||||||
|
|
||||||
## Dependencies and Dependant Modules
|
|
||||||
|
|
||||||
This flake depends on a single Nix flake input - nixpkgs repository. nixpkgs repository is used for all software packages used to build, run API service, tests, etc.
|
|
||||||
|
|
||||||
In order to synchronize nixpkgs input with the same from selfprivacy-nixos-config repository, use this command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ nix flake lock --override-input nixpkgs nixpkgs --inputs-from git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
|
|
||||||
```
|
|
||||||
|
|
||||||
Replace BRANCH with the branch name of selfprivacy-nixos-config repository you want to sync with. During development nixpkgs input update might be required in both selfprivacy-rest-api and selfprivacy-nixos-config repositories simultaneously. So, a new feature branch might be temporarily used until selfprivacy-nixos-config gets the feature branch merged.
|
|
||||||
|
|
||||||
Show current flake inputs (e.g. nixpkgs):
|
|
||||||
```console
|
|
||||||
$ nix flake metadata
|
|
||||||
```
|
|
||||||
|
|
||||||
Show selfprivacy-nixos-config Nix flake inputs (including nixpkgs):
|
|
||||||
```console
|
|
||||||
$ nix flake metadata git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
|
|
||||||
```
|
|
||||||
|
|
||||||
Nix code for NixOS service module for API is located in NixOS configuration repository.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
Sometimes commands inside `nix develop` refuse to work properly if the calling shell lacks `LANG` environment variable. Try to set it before entering `nix develop`.
|
|
29
default.nix
29
default.nix
|
@ -1,29 +0,0 @@
|
||||||
{ pythonPackages, rev ? "local" }:
|
|
||||||
|
|
||||||
pythonPackages.buildPythonPackage rec {
|
|
||||||
pname = "selfprivacy-graphql-api";
|
|
||||||
version = rev;
|
|
||||||
src = builtins.filterSource (p: t: p != ".git" && t != "symlink") ./.;
|
|
||||||
propagatedBuildInputs = with pythonPackages; [
|
|
||||||
fastapi
|
|
||||||
gevent
|
|
||||||
huey
|
|
||||||
mnemonic
|
|
||||||
portalocker
|
|
||||||
psutil
|
|
||||||
pydantic
|
|
||||||
pytz
|
|
||||||
redis
|
|
||||||
setuptools
|
|
||||||
strawberry-graphql
|
|
||||||
typing-extensions
|
|
||||||
uvicorn
|
|
||||||
];
|
|
||||||
pythonImportsCheck = [ "selfprivacy_api" ];
|
|
||||||
doCheck = false;
|
|
||||||
meta = {
|
|
||||||
description = ''
|
|
||||||
SelfPrivacy Server Management API
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
}
|
|
26
flake.lock
26
flake.lock
|
@ -1,26 +0,0 @@
|
||||||
{
|
|
||||||
"nodes": {
|
|
||||||
"nixpkgs": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1709677081,
|
|
||||||
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
|
|
||||||
"owner": "nixos",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nixos",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": "nixpkgs"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": "root",
|
|
||||||
"version": 7
|
|
||||||
}
|
|
162
flake.nix
162
flake.nix
|
@ -1,162 +0,0 @@
|
||||||
{
|
|
||||||
description = "SelfPrivacy API flake";
|
|
||||||
|
|
||||||
inputs.nixpkgs.url = "github:nixos/nixpkgs";
|
|
||||||
|
|
||||||
outputs = { self, nixpkgs, ... }:
|
|
||||||
let
|
|
||||||
system = "x86_64-linux";
|
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
|
||||||
selfprivacy-graphql-api = pkgs.callPackage ./default.nix {
|
|
||||||
pythonPackages = pkgs.python310Packages;
|
|
||||||
rev = self.shortRev or self.dirtyShortRev or "dirty";
|
|
||||||
};
|
|
||||||
python = self.packages.${system}.default.pythonModule;
|
|
||||||
python-env =
|
|
||||||
python.withPackages (ps:
|
|
||||||
self.packages.${system}.default.propagatedBuildInputs ++ (with ps; [
|
|
||||||
coverage
|
|
||||||
pytest
|
|
||||||
pytest-datadir
|
|
||||||
pytest-mock
|
|
||||||
pytest-subprocess
|
|
||||||
black
|
|
||||||
mypy
|
|
||||||
pylsp-mypy
|
|
||||||
python-lsp-black
|
|
||||||
python-lsp-server
|
|
||||||
pyflakes
|
|
||||||
typer # for strawberry
|
|
||||||
types-redis # for mypy
|
|
||||||
] ++ strawberry-graphql.optional-dependencies.cli));
|
|
||||||
|
|
||||||
vmtest-src-dir = "/root/source";
|
|
||||||
shellMOTD = ''
|
|
||||||
Welcome to SP API development shell!
|
|
||||||
|
|
||||||
[formatters]
|
|
||||||
|
|
||||||
black
|
|
||||||
nixpkgs-fmt
|
|
||||||
|
|
||||||
[testing in NixOS VM]
|
|
||||||
|
|
||||||
nixos-test-driver - run an interactive NixOS VM with all dependencies included and 2 disk volumes
|
|
||||||
pytest-vm - run pytest in an ephemeral NixOS VM with Redis, accepting pytest arguments
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
{
|
|
||||||
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
|
|
||||||
packages.${system} = {
|
|
||||||
default = selfprivacy-graphql-api;
|
|
||||||
pytest-vm = pkgs.writeShellScriptBin "pytest-vm" ''
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
|
|
||||||
export TMPDIR=''${TMPDIR:=/tmp}/nixos-vm-tmp-dir
|
|
||||||
readonly NIXOS_VM_SHARED_DIR_HOST="$TMPDIR/shared-xchg"
|
|
||||||
readonly NIXOS_VM_SHARED_DIR_GUEST="/tmp/shared"
|
|
||||||
|
|
||||||
mkdir -p "$TMPDIR"
|
|
||||||
ln -sfv "$PWD" -T "$NIXOS_VM_SHARED_DIR_HOST"
|
|
||||||
|
|
||||||
SCRIPT=$(cat <<EOF
|
|
||||||
start_all()
|
|
||||||
machine.succeed("ln -sf $NIXOS_VM_SHARED_DIR_GUEST -T ${vmtest-src-dir} >&2")
|
|
||||||
machine.succeed("cd ${vmtest-src-dir} && coverage run -m pytest -v $@ >&2")
|
|
||||||
machine.succeed("cd ${vmtest-src-dir} && coverage report >&2")
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ -f "/etc/arch-release" ]; then
|
|
||||||
${self.checks.${system}.default.driverInteractive}/bin/nixos-test-driver --no-interactive <(printf "%s" "$SCRIPT")
|
|
||||||
else
|
|
||||||
${self.checks.${system}.default.driver}/bin/nixos-test-driver -- <(printf "%s" "$SCRIPT")
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
nixosModules.default =
|
|
||||||
import ./nixos/module.nix self.packages.${system}.default;
|
|
||||||
devShells.${system}.default = pkgs.mkShellNoCC {
|
|
||||||
name = "SP API dev shell";
|
|
||||||
packages = with pkgs; [
|
|
||||||
nixpkgs-fmt
|
|
||||||
rclone
|
|
||||||
redis
|
|
||||||
restic
|
|
||||||
self.packages.${system}.pytest-vm
|
|
||||||
# FIXME consider loading this explicitly only after ArchLinux issue is solved
|
|
||||||
self.checks.x86_64-linux.default.driverInteractive
|
|
||||||
# the target API application python environment
|
|
||||||
python-env
|
|
||||||
];
|
|
||||||
shellHook = ''
|
|
||||||
# envs set with export and as attributes are treated differently.
|
|
||||||
# for example. printenv <Name> will not fetch the value of an attribute.
|
|
||||||
export TEST_MODE="true"
|
|
||||||
|
|
||||||
# more tips for bash-completion to work on non-NixOS:
|
|
||||||
# https://discourse.nixos.org/t/whats-the-nix-way-of-bash-completion-for-packages/20209/16?u=alexoundos
|
|
||||||
# Load installed profiles
|
|
||||||
for file in "/etc/profile.d/"*.sh; do
|
|
||||||
# If that folder doesn't exist, bash loves to return the whole glob
|
|
||||||
[[ -f "$file" ]] && source "$file"
|
|
||||||
done
|
|
||||||
|
|
||||||
printf "%s" "${shellMOTD}"
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
checks.${system} = {
|
|
||||||
fmt-check = pkgs.runCommandLocal "sp-api-fmt-check"
|
|
||||||
{ nativeBuildInputs = [ pkgs.black ]; }
|
|
||||||
"black --check ${self.outPath} > $out";
|
|
||||||
default =
|
|
||||||
pkgs.testers.runNixOSTest {
|
|
||||||
name = "default";
|
|
||||||
nodes.machine = { lib, pkgs, ... }: {
|
|
||||||
# 2 additional disks (1024 MiB and 200 MiB) with empty ext4 FS
|
|
||||||
virtualisation.emptyDiskImages = [ 1024 200 ];
|
|
||||||
virtualisation.fileSystems."/volumes/vdb" = {
|
|
||||||
autoFormat = true;
|
|
||||||
device = "/dev/vdb"; # this name is chosen by QEMU, not here
|
|
||||||
fsType = "ext4";
|
|
||||||
noCheck = true;
|
|
||||||
};
|
|
||||||
virtualisation.fileSystems."/volumes/vdc" = {
|
|
||||||
autoFormat = true;
|
|
||||||
device = "/dev/vdc"; # this name is chosen by QEMU, not here
|
|
||||||
fsType = "ext4";
|
|
||||||
noCheck = true;
|
|
||||||
};
|
|
||||||
boot.consoleLogLevel = lib.mkForce 3;
|
|
||||||
documentation.enable = false;
|
|
||||||
services.journald.extraConfig = lib.mkForce "";
|
|
||||||
services.redis.servers.sp-api = {
|
|
||||||
enable = true;
|
|
||||||
save = [ ];
|
|
||||||
settings.notify-keyspace-events = "KEA";
|
|
||||||
};
|
|
||||||
environment.systemPackages = with pkgs; [
|
|
||||||
python-env
|
|
||||||
# TODO: these can be passed via wrapper script around app
|
|
||||||
rclone
|
|
||||||
restic
|
|
||||||
];
|
|
||||||
environment.variables.TEST_MODE = "true";
|
|
||||||
systemd.tmpfiles.settings.src.${vmtest-src-dir}.L.argument =
|
|
||||||
self.outPath;
|
|
||||||
};
|
|
||||||
testScript = ''
|
|
||||||
start_all()
|
|
||||||
machine.succeed("cd ${vmtest-src-dir} && coverage run --data-file=/tmp/.coverage -m pytest -p no:cacheprovider -v >&2")
|
|
||||||
machine.succeed("coverage xml --rcfile=${vmtest-src-dir}/.coveragerc --data-file=/tmp/.coverage >&2")
|
|
||||||
machine.copy_from_vm("coverage.xml", ".")
|
|
||||||
machine.succeed("coverage report >&2")
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
nixConfig.bash-prompt = ''\n\[\e[1;32m\][\[\e[0m\]\[\e[1;34m\]SP devshell\[\e[0m\]\[\e[1;32m\]:\w]\$\[\[\e[0m\] '';
|
|
||||||
}
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
from flask import Flask, jsonify, request, json
|
||||||
|
from flask_restful import Resource, Api, reqparse
|
||||||
|
import base64
|
||||||
|
import pandas as pd
|
||||||
|
import ast
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
app = Flask(__name__)
|
||||||
|
api = Api(app)
|
||||||
|
@app.route("/systemVersion", methods=["GET"])
|
||||||
|
def uname():
|
||||||
|
uname = subprocess.check_output(["uname", "-arm"])
|
||||||
|
return jsonify(uname)
|
||||||
|
@app.route("/getDKIM", methods=["GET"])
|
||||||
|
def getDkimKey():
|
||||||
|
with open("/var/domain") as domainFile:
|
||||||
|
domain = domainFile.readline()
|
||||||
|
domain = domain.rstrip("\n")
|
||||||
|
catProcess = subprocess.Popen(["cat", "/var/dkim/" + domain + ".selector.txt"], stdout=subprocess.PIPE)
|
||||||
|
dkim = catProcess.communicate()[0]
|
||||||
|
dkim = base64.b64encode(dkim)
|
||||||
|
dkim = str(dkim, 'utf-8')
|
||||||
|
print(dkim)
|
||||||
|
response = app.response_class(
|
||||||
|
response=json.dumps(dkim),
|
||||||
|
status=200,
|
||||||
|
mimetype='application/json'
|
||||||
|
)
|
||||||
|
return response
|
||||||
|
@app.route("/pythonVersion", methods=["GET"])
|
||||||
|
def getPythonVersion():
|
||||||
|
pythonVersion = subprocess.check_output(["python","--version"])
|
||||||
|
return jsonify(pythonVersion)
|
||||||
|
@app.route("/apply", methods=["GET"])
|
||||||
|
def rebuildSystem():
|
||||||
|
rebuildResult = subprocess.Popen(["nixos-rebuild","switch"])
|
||||||
|
rebuildResult.communicate()[0]
|
||||||
|
return jsonify(rebuildResult.returncode)
|
||||||
|
@app.route("/rollback", methods=["GET"])
|
||||||
|
def rollbackSystem():
|
||||||
|
rollbackResult = subprocess.Popen(["nixos-rebuild","switch","--rollback"])
|
||||||
|
rollbackResult.communicate()[0]
|
||||||
|
return jsonify(rollbackResult.returncode)
|
||||||
|
@app.route("/upgrade", methods=["GET"])
|
||||||
|
def upgradeSystem():
|
||||||
|
upgradeResult = subprocess.Popen(["nixos-rebuild","switch","--upgrade"])
|
||||||
|
upgradeResult.communicate()[0]
|
||||||
|
return jsonify(upgradeResult.returncode)
|
||||||
|
@app.route("/createUser", methods=["GET"])
|
||||||
|
def createUser():
|
||||||
|
user = subprocess.Popen(["useradd","-m",request.headers.get("X-User")])
|
||||||
|
user.communicate()[0]
|
||||||
|
return jsonify(user.returncode)
|
||||||
|
@app.route("/deleteUser", methods=["DELETE"])
|
||||||
|
def deleteUser():
|
||||||
|
user = subprocess.Popen(["userdel",request.headers.get("X-User")])
|
||||||
|
user.communicate()[0]
|
||||||
|
return jsonify(user.returncode)
|
||||||
|
@app.route("/serviceStatus", methods=["GET"])
|
||||||
|
def getServiceStatus():
|
||||||
|
imapService = subprocess.Popen(["systemctl", "status", "dovecot2.service"])
|
||||||
|
imapService.communicate()[0]
|
||||||
|
smtpService = subprocess.Popen(["systemctl", "status", "postfix.service"])
|
||||||
|
smtpService.communicate()[0]
|
||||||
|
httpService = subprocess.Popen(["systemctl", "status", "nginx.service"])
|
||||||
|
httpService.communicate()[0]
|
||||||
|
return jsonify(
|
||||||
|
imap=imapService.returncode,
|
||||||
|
smtp=smtpService.returncode,
|
||||||
|
http=httpService.returncode
|
||||||
|
)
|
||||||
|
@app.route("/decryptDisk", methods=["POST"])
|
||||||
|
def requestDiskDecryption():
|
||||||
|
decryptionService = subprocess.Popen(["echo", "-n", request.headers['X-Decryption-Key'], "|", "cryptsetup", "luksOpen", "/dev/sdb", "decryptedVar"], stdout=subprocess.PIPE, shell=False)
|
||||||
|
decryptionService.communicate()[0]
|
||||||
|
return jsonify(
|
||||||
|
status=decryptionService.returncode
|
||||||
|
)
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(port=5050, debug=False)
|
|
@ -1,22 +0,0 @@
|
||||||
@startuml
|
|
||||||
|
|
||||||
left to right direction
|
|
||||||
|
|
||||||
title repositories and flake inputs relations diagram
|
|
||||||
|
|
||||||
cloud nixpkgs as nixpkgs_transit
|
|
||||||
control "<font:monospaced><size:15>nixos-rebuild" as nixos_rebuild
|
|
||||||
component "SelfPrivacy\nAPI app" as selfprivacy_app
|
|
||||||
component "SelfPrivacy\nNixOS configuration" as nixos_configuration
|
|
||||||
|
|
||||||
note top of nixos_configuration : SelfPrivacy\nAPI service module
|
|
||||||
|
|
||||||
nixos_configuration ).. nixpkgs_transit
|
|
||||||
nixpkgs_transit ..> selfprivacy_app
|
|
||||||
selfprivacy_app --> nixos_configuration
|
|
||||||
[nixpkgs] --> nixos_configuration
|
|
||||||
nixos_configuration -> nixos_rebuild
|
|
||||||
|
|
||||||
footer %date("yyyy-MM-dd'T'HH:mmZ")
|
|
||||||
|
|
||||||
@enduml
|
|
166
nixos/module.nix
166
nixos/module.nix
|
@ -1,166 +0,0 @@
|
||||||
selfprivacy-graphql-api: { config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.services.selfprivacy-api;
|
|
||||||
config-id = "default";
|
|
||||||
nixos-rebuild = "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
|
|
||||||
nix = "${config.nix.package.out}/bin/nix";
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.services.selfprivacy-api = {
|
|
||||||
enable = lib.mkOption {
|
|
||||||
default = true;
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = ''
|
|
||||||
Enable SelfPrivacy API service
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
users.users."selfprivacy-api" = {
|
|
||||||
isNormalUser = false;
|
|
||||||
isSystemUser = true;
|
|
||||||
extraGroups = [ "opendkim" ];
|
|
||||||
group = "selfprivacy-api";
|
|
||||||
};
|
|
||||||
users.groups."selfprivacy-api".members = [ "selfprivacy-api" ];
|
|
||||||
|
|
||||||
systemd.services.selfprivacy-api = {
|
|
||||||
description = "API Server used to control system from the mobile application";
|
|
||||||
environment = config.nix.envVars // {
|
|
||||||
HOME = "/root";
|
|
||||||
PYTHONUNBUFFERED = "1";
|
|
||||||
} // config.networking.proxy.envVars;
|
|
||||||
path = [
|
|
||||||
"/var/"
|
|
||||||
"/var/dkim/"
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.gnutar
|
|
||||||
pkgs.xz.bin
|
|
||||||
pkgs.gzip
|
|
||||||
pkgs.gitMinimal
|
|
||||||
config.nix.package.out
|
|
||||||
pkgs.restic
|
|
||||||
pkgs.mkpasswd
|
|
||||||
pkgs.util-linux
|
|
||||||
pkgs.e2fsprogs
|
|
||||||
pkgs.iproute2
|
|
||||||
];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
wantedBy = [ "network-online.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
User = "root";
|
|
||||||
ExecStart = "${selfprivacy-graphql-api}/bin/app.py";
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "5";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systemd.services.selfprivacy-api-worker = {
|
|
||||||
description = "Task worker for SelfPrivacy API";
|
|
||||||
environment = config.nix.envVars // {
|
|
||||||
HOME = "/root";
|
|
||||||
PYTHONUNBUFFERED = "1";
|
|
||||||
PYTHONPATH =
|
|
||||||
pkgs.python310Packages.makePythonPath [ selfprivacy-graphql-api ];
|
|
||||||
} // config.networking.proxy.envVars;
|
|
||||||
path = [
|
|
||||||
"/var/"
|
|
||||||
"/var/dkim/"
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.gnutar
|
|
||||||
pkgs.xz.bin
|
|
||||||
pkgs.gzip
|
|
||||||
pkgs.gitMinimal
|
|
||||||
config.nix.package.out
|
|
||||||
pkgs.restic
|
|
||||||
pkgs.mkpasswd
|
|
||||||
pkgs.util-linux
|
|
||||||
pkgs.e2fsprogs
|
|
||||||
pkgs.iproute2
|
|
||||||
];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
wantedBy = [ "network-online.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
User = "root";
|
|
||||||
ExecStart = "${pkgs.python310Packages.huey}/bin/huey_consumer.py selfprivacy_api.task_registry.huey";
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "5";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
# One shot systemd service to rebuild NixOS using nixos-rebuild
|
|
||||||
systemd.services.sp-nixos-rebuild = {
|
|
||||||
description = "nixos-rebuild switch";
|
|
||||||
environment = config.nix.envVars // {
|
|
||||||
HOME = "/root";
|
|
||||||
} // config.networking.proxy.envVars;
|
|
||||||
# TODO figure out how to get dependencies list reliably
|
|
||||||
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
|
||||||
# TODO set proper timeout for reboot instead of service restart
|
|
||||||
serviceConfig = {
|
|
||||||
User = "root";
|
|
||||||
WorkingDirectory = "/etc/nixos";
|
|
||||||
# sync top-level flake with sp-modules sub-flake
|
|
||||||
# (https://github.com/NixOS/nix/issues/9339)
|
|
||||||
ExecStartPre = ''
|
|
||||||
${nix} flake lock --override-input sp-modules path:./sp-modules
|
|
||||||
'';
|
|
||||||
ExecStart = ''
|
|
||||||
${nixos-rebuild} switch --flake .#${config-id}
|
|
||||||
'';
|
|
||||||
KillMode = "none";
|
|
||||||
SendSIGKILL = "no";
|
|
||||||
};
|
|
||||||
restartIfChanged = false;
|
|
||||||
unitConfig.X-StopOnRemoval = false;
|
|
||||||
};
|
|
||||||
# One shot systemd service to upgrade NixOS using nixos-rebuild
|
|
||||||
systemd.services.sp-nixos-upgrade = {
|
|
||||||
# protection against simultaneous runs
|
|
||||||
after = [ "sp-nixos-rebuild.service" ];
|
|
||||||
description = "Upgrade NixOS and SP modules to latest versions";
|
|
||||||
environment = config.nix.envVars // {
|
|
||||||
HOME = "/root";
|
|
||||||
} // config.networking.proxy.envVars;
|
|
||||||
# TODO figure out how to get dependencies list reliably
|
|
||||||
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
|
||||||
serviceConfig = {
|
|
||||||
User = "root";
|
|
||||||
WorkingDirectory = "/etc/nixos";
|
|
||||||
# TODO get URL from systemd template parameter?
|
|
||||||
ExecStartPre = ''
|
|
||||||
${nix} flake update \
|
|
||||||
--override-input selfprivacy-nixos-config git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=flakes
|
|
||||||
'';
|
|
||||||
ExecStart = ''
|
|
||||||
${nixos-rebuild} switch --flake .#${config-id}
|
|
||||||
'';
|
|
||||||
KillMode = "none";
|
|
||||||
SendSIGKILL = "no";
|
|
||||||
};
|
|
||||||
restartIfChanged = false;
|
|
||||||
unitConfig.X-StopOnRemoval = false;
|
|
||||||
};
|
|
||||||
# One shot systemd service to rollback NixOS using nixos-rebuild
|
|
||||||
systemd.services.sp-nixos-rollback = {
|
|
||||||
# protection against simultaneous runs
|
|
||||||
after = [ "sp-nixos-rebuild.service" "sp-nixos-upgrade.service" ];
|
|
||||||
description = "Rollback NixOS using nixos-rebuild";
|
|
||||||
environment = config.nix.envVars // {
|
|
||||||
HOME = "/root";
|
|
||||||
} // config.networking.proxy.envVars;
|
|
||||||
# TODO figure out how to get dependencies list reliably
|
|
||||||
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
|
||||||
serviceConfig = {
|
|
||||||
User = "root";
|
|
||||||
WorkingDirectory = "/etc/nixos";
|
|
||||||
ExecStart = ''
|
|
||||||
${nixos-rebuild} switch --rollback --flake .#${config-id}
|
|
||||||
'';
|
|
||||||
KillMode = "none";
|
|
||||||
SendSIGKILL = "no";
|
|
||||||
};
|
|
||||||
restartIfChanged = false;
|
|
||||||
unitConfig.X-StopOnRemoval = false;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,3 +0,0 @@
|
||||||
[build-system]
|
|
||||||
requires = ["setuptools", "wheel", "portalocker"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
flask
|
||||||
|
flask_restful
|
||||||
|
pandas
|
||||||
|
ast
|
||||||
|
subprocess
|
||||||
|
os
|
|
@ -1,181 +0,0 @@
|
||||||
"""
|
|
||||||
App tokens actions.
|
|
||||||
The only actions on tokens that are accessible from APIs
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from mnemonic import Mnemonic
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.timeutils import ensure_tz_aware, ensure_tz_aware_strict
|
|
||||||
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
|
|
||||||
RedisTokensRepository,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.repositories.tokens.exceptions import (
|
|
||||||
TokenNotFound,
|
|
||||||
RecoveryKeyNotFound,
|
|
||||||
InvalidMnemonic,
|
|
||||||
NewDeviceKeyNotFound,
|
|
||||||
)
|
|
||||||
|
|
||||||
TOKEN_REPO = RedisTokensRepository()
|
|
||||||
|
|
||||||
|
|
||||||
class TokenInfoWithIsCaller(BaseModel):
|
|
||||||
"""Token info"""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
date: datetime
|
|
||||||
is_caller: bool
|
|
||||||
|
|
||||||
|
|
||||||
def _naive(date_time: datetime) -> datetime:
|
|
||||||
if date_time is None:
|
|
||||||
return None
|
|
||||||
if date_time.tzinfo is not None:
|
|
||||||
date_time.astimezone(timezone.utc)
|
|
||||||
return date_time.replace(tzinfo=None)
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_tokens_with_caller_flag(caller_token: str) -> list[TokenInfoWithIsCaller]:
|
|
||||||
"""Get the tokens info"""
|
|
||||||
caller_name = TOKEN_REPO.get_token_by_token_string(caller_token).device_name
|
|
||||||
tokens = TOKEN_REPO.get_tokens()
|
|
||||||
return [
|
|
||||||
TokenInfoWithIsCaller(
|
|
||||||
name=token.device_name,
|
|
||||||
date=token.created_at,
|
|
||||||
is_caller=token.device_name == caller_name,
|
|
||||||
)
|
|
||||||
for token in tokens
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def is_token_valid(token) -> bool:
|
|
||||||
"""Check if token is valid"""
|
|
||||||
return TOKEN_REPO.is_token_valid(token)
|
|
||||||
|
|
||||||
|
|
||||||
class NotFoundException(Exception):
|
|
||||||
"""Not found exception"""
|
|
||||||
|
|
||||||
|
|
||||||
class CannotDeleteCallerException(Exception):
|
|
||||||
"""Cannot delete caller exception"""
|
|
||||||
|
|
||||||
|
|
||||||
def delete_api_token(caller_token: str, token_name: str) -> None:
|
|
||||||
"""Delete the token"""
|
|
||||||
if TOKEN_REPO.is_token_name_pair_valid(token_name, caller_token):
|
|
||||||
raise CannotDeleteCallerException("Cannot delete caller's token")
|
|
||||||
if not TOKEN_REPO.is_token_name_exists(token_name):
|
|
||||||
raise NotFoundException("Token not found")
|
|
||||||
token = TOKEN_REPO.get_token_by_name(token_name)
|
|
||||||
TOKEN_REPO.delete_token(token)
|
|
||||||
|
|
||||||
|
|
||||||
def refresh_api_token(caller_token: str) -> str:
|
|
||||||
"""Refresh the token"""
|
|
||||||
try:
|
|
||||||
old_token = TOKEN_REPO.get_token_by_token_string(caller_token)
|
|
||||||
new_token = TOKEN_REPO.refresh_token(old_token)
|
|
||||||
except TokenNotFound:
|
|
||||||
raise NotFoundException("Token not found")
|
|
||||||
return new_token.token
|
|
||||||
|
|
||||||
|
|
||||||
class RecoveryTokenStatus(BaseModel):
|
|
||||||
"""Recovery token status"""
|
|
||||||
|
|
||||||
exists: bool
|
|
||||||
valid: bool
|
|
||||||
date: Optional[datetime] = None
|
|
||||||
expiration: Optional[datetime] = None
|
|
||||||
uses_left: Optional[int] = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_recovery_token_status() -> RecoveryTokenStatus:
|
|
||||||
"""Get the recovery token status, timezone-aware"""
|
|
||||||
token = TOKEN_REPO.get_recovery_key()
|
|
||||||
if token is None:
|
|
||||||
return RecoveryTokenStatus(exists=False, valid=False)
|
|
||||||
is_valid = TOKEN_REPO.is_recovery_key_valid()
|
|
||||||
|
|
||||||
# New tokens are tz-aware, but older ones might not be
|
|
||||||
expiry_date = token.expires_at
|
|
||||||
if expiry_date is not None:
|
|
||||||
expiry_date = ensure_tz_aware_strict(expiry_date)
|
|
||||||
|
|
||||||
return RecoveryTokenStatus(
|
|
||||||
exists=True,
|
|
||||||
valid=is_valid,
|
|
||||||
date=ensure_tz_aware_strict(token.created_at),
|
|
||||||
expiration=expiry_date,
|
|
||||||
uses_left=token.uses_left,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidExpirationDate(Exception):
|
|
||||||
"""Invalid expiration date exception"""
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidUsesLeft(Exception):
|
|
||||||
"""Invalid uses left exception"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_new_api_recovery_key(
|
|
||||||
expiration_date: Optional[datetime] = None, uses_left: Optional[int] = None
|
|
||||||
) -> str:
|
|
||||||
"""Get new recovery key"""
|
|
||||||
if expiration_date is not None:
|
|
||||||
expiration_date = ensure_tz_aware(expiration_date)
|
|
||||||
current_time = datetime.now(timezone.utc)
|
|
||||||
if expiration_date < current_time:
|
|
||||||
raise InvalidExpirationDate("Expiration date is in the past")
|
|
||||||
if uses_left is not None:
|
|
||||||
if uses_left <= 0:
|
|
||||||
raise InvalidUsesLeft("Uses must be greater than 0")
|
|
||||||
|
|
||||||
key = TOKEN_REPO.create_recovery_key(expiration_date, uses_left)
|
|
||||||
mnemonic_phrase = Mnemonic(language="english").to_mnemonic(bytes.fromhex(key.key))
|
|
||||||
return mnemonic_phrase
|
|
||||||
|
|
||||||
|
|
||||||
def use_mnemonic_recovery_token(mnemonic_phrase, name):
|
|
||||||
"""Use the recovery token by converting the mnemonic word list to a byte array.
|
|
||||||
If the recovery token if invalid itself, return None
|
|
||||||
If the binary representation of phrase not matches
|
|
||||||
the byte array of the recovery token, return None.
|
|
||||||
If the mnemonic phrase is valid then generate a device token and return it.
|
|
||||||
Substract 1 from uses_left if it exists.
|
|
||||||
mnemonic_phrase is a string representation of the mnemonic word list.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
token = TOKEN_REPO.use_mnemonic_recovery_key(mnemonic_phrase, name)
|
|
||||||
return token.token
|
|
||||||
except (RecoveryKeyNotFound, InvalidMnemonic):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def delete_new_device_auth_token() -> None:
|
|
||||||
TOKEN_REPO.delete_new_device_key()
|
|
||||||
|
|
||||||
|
|
||||||
def get_new_device_auth_token() -> str:
|
|
||||||
"""Generate and store a new device auth token which is valid for 10 minutes
|
|
||||||
and return a mnemonic phrase representation
|
|
||||||
"""
|
|
||||||
key = TOKEN_REPO.get_new_device_key()
|
|
||||||
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(key.key))
|
|
||||||
|
|
||||||
|
|
||||||
def use_new_device_auth_token(mnemonic_phrase, name) -> Optional[str]:
|
|
||||||
"""Use the new device auth token by converting the mnemonic string to a byte array.
|
|
||||||
If the mnemonic phrase is valid then generate a device token and return it.
|
|
||||||
New device auth token must be deleted.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
token = TOKEN_REPO.use_mnemonic_new_device_key(mnemonic_phrase, name)
|
|
||||||
return token.token
|
|
||||||
except (NewDeviceKeyNotFound, InvalidMnemonic):
|
|
||||||
return None
|
|
|
@ -1,34 +0,0 @@
|
||||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
|
||||||
from selfprivacy_api.jobs import Jobs, Job
|
|
||||||
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
from selfprivacy_api.services.tasks import move_service as move_service_task
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceNotFoundError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeNotFoundError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def move_service(service_id: str, volume_name: str) -> Job:
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
raise ServiceNotFoundError(f"No such service:{service_id}")
|
|
||||||
|
|
||||||
volume = BlockDevices().get_block_device(volume_name)
|
|
||||||
if volume is None:
|
|
||||||
raise VolumeNotFoundError(f"No such volume:{volume_name}")
|
|
||||||
|
|
||||||
service.assert_can_move(volume)
|
|
||||||
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id=f"services.{service.get_id()}.move",
|
|
||||||
name=f"Move {service.get_display_name()}",
|
|
||||||
description=f"Moving {service.get_display_name()} data to {volume.name}",
|
|
||||||
)
|
|
||||||
|
|
||||||
move_service_task(service, volume, job)
|
|
||||||
return job
|
|
|
@ -1,136 +0,0 @@
|
||||||
"""Actions to manage the SSH."""
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from selfprivacy_api.actions.users import (
|
|
||||||
UserNotFound,
|
|
||||||
ensure_ssh_and_users_fields_exist,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.utils import WriteUserData, ReadUserData, validate_ssh_public_key
|
|
||||||
|
|
||||||
|
|
||||||
def enable_ssh():
|
|
||||||
with WriteUserData() as data:
|
|
||||||
if "ssh" not in data:
|
|
||||||
data["ssh"] = {}
|
|
||||||
data["ssh"]["enable"] = True
|
|
||||||
|
|
||||||
|
|
||||||
class UserdataSshSettings(BaseModel):
|
|
||||||
"""Settings for the SSH."""
|
|
||||||
|
|
||||||
enable: bool = True
|
|
||||||
passwordAuthentication: bool = True
|
|
||||||
rootKeys: list[str] = []
|
|
||||||
|
|
||||||
|
|
||||||
def get_ssh_settings() -> UserdataSshSettings:
|
|
||||||
with ReadUserData() as data:
|
|
||||||
if "ssh" not in data:
|
|
||||||
return UserdataSshSettings()
|
|
||||||
if "enable" not in data["ssh"]:
|
|
||||||
data["ssh"]["enable"] = True
|
|
||||||
if "passwordAuthentication" not in data["ssh"]:
|
|
||||||
data["ssh"]["passwordAuthentication"] = False
|
|
||||||
if "rootKeys" not in data["ssh"]:
|
|
||||||
data["ssh"]["rootKeys"] = []
|
|
||||||
return UserdataSshSettings(**data["ssh"])
|
|
||||||
|
|
||||||
|
|
||||||
def set_ssh_settings(
|
|
||||||
enable: Optional[bool] = None, password_authentication: Optional[bool] = None
|
|
||||||
) -> None:
|
|
||||||
with WriteUserData() as data:
|
|
||||||
if "ssh" not in data:
|
|
||||||
data["ssh"] = {}
|
|
||||||
if enable is not None:
|
|
||||||
data["ssh"]["enable"] = enable
|
|
||||||
if password_authentication is not None:
|
|
||||||
data["ssh"]["passwordAuthentication"] = password_authentication
|
|
||||||
|
|
||||||
|
|
||||||
class KeyAlreadyExists(Exception):
|
|
||||||
"""Key already exists"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidPublicKey(Exception):
|
|
||||||
"""Invalid public key"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def create_ssh_key(username: str, ssh_key: str):
|
|
||||||
"""Create a new ssh key"""
|
|
||||||
|
|
||||||
if not validate_ssh_public_key(ssh_key):
|
|
||||||
raise InvalidPublicKey()
|
|
||||||
|
|
||||||
with WriteUserData() as data:
|
|
||||||
ensure_ssh_and_users_fields_exist(data)
|
|
||||||
|
|
||||||
if username == data["username"]:
|
|
||||||
if ssh_key in data["sshKeys"]:
|
|
||||||
raise KeyAlreadyExists()
|
|
||||||
|
|
||||||
data["sshKeys"].append(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
if username == "root":
|
|
||||||
if ssh_key in data["ssh"]["rootKeys"]:
|
|
||||||
raise KeyAlreadyExists()
|
|
||||||
|
|
||||||
data["ssh"]["rootKeys"].append(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
for user in data["users"]:
|
|
||||||
if user["username"] == username:
|
|
||||||
if "sshKeys" not in user:
|
|
||||||
user["sshKeys"] = []
|
|
||||||
if ssh_key in user["sshKeys"]:
|
|
||||||
raise KeyAlreadyExists()
|
|
||||||
|
|
||||||
user["sshKeys"].append(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
raise UserNotFound()
|
|
||||||
|
|
||||||
|
|
||||||
class KeyNotFound(Exception):
|
|
||||||
"""Key not found"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def remove_ssh_key(username: str, ssh_key: str):
|
|
||||||
"""Delete a ssh key"""
|
|
||||||
|
|
||||||
with WriteUserData() as data:
|
|
||||||
ensure_ssh_and_users_fields_exist(data)
|
|
||||||
|
|
||||||
if username == "root":
|
|
||||||
if ssh_key in data["ssh"]["rootKeys"]:
|
|
||||||
data["ssh"]["rootKeys"].remove(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
raise KeyNotFound()
|
|
||||||
|
|
||||||
if username == data["username"]:
|
|
||||||
if ssh_key in data["sshKeys"]:
|
|
||||||
data["sshKeys"].remove(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
raise KeyNotFound()
|
|
||||||
|
|
||||||
for user in data["users"]:
|
|
||||||
if user["username"] == username:
|
|
||||||
if "sshKeys" not in user:
|
|
||||||
user["sshKeys"] = []
|
|
||||||
if ssh_key in user["sshKeys"]:
|
|
||||||
user["sshKeys"].remove(ssh_key)
|
|
||||||
return
|
|
||||||
|
|
||||||
raise KeyNotFound()
|
|
||||||
|
|
||||||
raise UserNotFound()
|
|
|
@ -1,173 +0,0 @@
|
||||||
"""Actions to manage the system."""
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import pytz
|
|
||||||
from typing import Optional, List
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
|
||||||
from selfprivacy_api.jobs.upgrade_system import rebuild_system_task
|
|
||||||
|
|
||||||
from selfprivacy_api.utils import WriteUserData, ReadUserData
|
|
||||||
|
|
||||||
|
|
||||||
def get_timezone() -> str:
|
|
||||||
"""Get the timezone of the server"""
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
if "timezone" in user_data:
|
|
||||||
return user_data["timezone"]
|
|
||||||
return "Etc/UTC"
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidTimezone(Exception):
|
|
||||||
"""Invalid timezone"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def change_timezone(timezone: str) -> None:
|
|
||||||
"""Change the timezone of the server"""
|
|
||||||
if timezone not in pytz.all_timezones:
|
|
||||||
raise InvalidTimezone(f"Invalid timezone: {timezone}")
|
|
||||||
with WriteUserData() as user_data:
|
|
||||||
user_data["timezone"] = timezone
|
|
||||||
|
|
||||||
|
|
||||||
class UserDataAutoUpgradeSettings(BaseModel):
|
|
||||||
"""Settings for auto-upgrading user data"""
|
|
||||||
|
|
||||||
enable: bool = True
|
|
||||||
allowReboot: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
def get_auto_upgrade_settings() -> UserDataAutoUpgradeSettings:
|
|
||||||
"""Get the auto-upgrade settings"""
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
if "autoUpgrade" in user_data:
|
|
||||||
return UserDataAutoUpgradeSettings(**user_data["autoUpgrade"])
|
|
||||||
return UserDataAutoUpgradeSettings()
|
|
||||||
|
|
||||||
|
|
||||||
def set_auto_upgrade_settings(
|
|
||||||
enalbe: Optional[bool] = None, allowReboot: Optional[bool] = None
|
|
||||||
) -> None:
|
|
||||||
"""Set the auto-upgrade settings"""
|
|
||||||
with WriteUserData() as user_data:
|
|
||||||
if "autoUpgrade" not in user_data:
|
|
||||||
user_data["autoUpgrade"] = {}
|
|
||||||
if enalbe is not None:
|
|
||||||
user_data["autoUpgrade"]["enable"] = enalbe
|
|
||||||
if allowReboot is not None:
|
|
||||||
user_data["autoUpgrade"]["allowReboot"] = allowReboot
|
|
||||||
|
|
||||||
|
|
||||||
class ShellException(Exception):
|
|
||||||
"""Something went wrong when calling another process"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def run_blocking(cmd: List[str], new_session: bool = False) -> str:
|
|
||||||
"""Run a process, block until done, return output, complain if failed"""
|
|
||||||
process_handle = subprocess.Popen(
|
|
||||||
cmd,
|
|
||||||
shell=False,
|
|
||||||
start_new_session=new_session,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
)
|
|
||||||
stdout_raw, stderr_raw = process_handle.communicate()
|
|
||||||
stdout = stdout_raw.decode("utf-8")
|
|
||||||
if stderr_raw is not None:
|
|
||||||
stderr = stderr_raw.decode("utf-8")
|
|
||||||
else:
|
|
||||||
stderr = ""
|
|
||||||
output = stdout + "\n" + stderr
|
|
||||||
if process_handle.returncode != 0:
|
|
||||||
raise ShellException(
|
|
||||||
f"Shell command failed, command array: {cmd}, output: {output}"
|
|
||||||
)
|
|
||||||
return stdout
|
|
||||||
|
|
||||||
|
|
||||||
def rebuild_system() -> Job:
|
|
||||||
"""Rebuild the system"""
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id="system.nixos.rebuild",
|
|
||||||
name="Rebuild system",
|
|
||||||
description="Applying the new system configuration by building the new NixOS generation.",
|
|
||||||
status=JobStatus.CREATED,
|
|
||||||
)
|
|
||||||
rebuild_system_task(job)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def rollback_system() -> int:
|
|
||||||
"""Rollback the system"""
|
|
||||||
run_blocking(["systemctl", "start", "sp-nixos-rollback.service"], new_session=True)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade_system() -> Job:
|
|
||||||
"""Upgrade the system"""
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id="system.nixos.upgrade",
|
|
||||||
name="Upgrade system",
|
|
||||||
description="Upgrading the system to the latest version.",
|
|
||||||
status=JobStatus.CREATED,
|
|
||||||
)
|
|
||||||
rebuild_system_task(job, upgrade=True)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def reboot_system() -> None:
|
|
||||||
"""Reboot the system"""
|
|
||||||
run_blocking(["reboot"], new_session=True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_version() -> str:
|
|
||||||
"""Get system version"""
|
|
||||||
return subprocess.check_output(["uname", "-a"]).decode("utf-8").strip()
|
|
||||||
|
|
||||||
|
|
||||||
def get_python_version() -> str:
|
|
||||||
"""Get Python version"""
|
|
||||||
return subprocess.check_output(["python", "-V"]).decode("utf-8").strip()
|
|
||||||
|
|
||||||
|
|
||||||
class SystemActionResult(BaseModel):
|
|
||||||
"""System action result"""
|
|
||||||
|
|
||||||
status: int
|
|
||||||
message: str
|
|
||||||
data: str
|
|
||||||
|
|
||||||
|
|
||||||
def pull_repository_changes() -> SystemActionResult:
|
|
||||||
"""Pull repository changes"""
|
|
||||||
git_pull_command = ["git", "pull"]
|
|
||||||
|
|
||||||
current_working_directory = os.getcwd()
|
|
||||||
os.chdir("/etc/nixos")
|
|
||||||
|
|
||||||
git_pull_process_descriptor = subprocess.Popen(
|
|
||||||
git_pull_command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
shell=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = git_pull_process_descriptor.communicate()[0].decode("utf-8")
|
|
||||||
|
|
||||||
os.chdir(current_working_directory)
|
|
||||||
|
|
||||||
if git_pull_process_descriptor.returncode == 0:
|
|
||||||
return SystemActionResult(
|
|
||||||
status=0,
|
|
||||||
message="Pulled repository changes",
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
return SystemActionResult(
|
|
||||||
status=git_pull_process_descriptor.returncode,
|
|
||||||
message="Failed to pull repository changes",
|
|
||||||
data=data,
|
|
||||||
)
|
|
|
@ -1,229 +0,0 @@
|
||||||
"""Actions to manage the users."""
|
|
||||||
import re
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from enum import Enum
|
|
||||||
from selfprivacy_api.utils import (
|
|
||||||
ReadUserData,
|
|
||||||
WriteUserData,
|
|
||||||
hash_password,
|
|
||||||
is_username_forbidden,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UserDataUserOrigin(Enum):
|
|
||||||
"""Origin of the user in the user data"""
|
|
||||||
|
|
||||||
NORMAL = "NORMAL"
|
|
||||||
PRIMARY = "PRIMARY"
|
|
||||||
ROOT = "ROOT"
|
|
||||||
|
|
||||||
|
|
||||||
class UserDataUser(BaseModel):
|
|
||||||
"""The user model from the userdata file"""
|
|
||||||
|
|
||||||
username: str
|
|
||||||
ssh_keys: list[str]
|
|
||||||
origin: UserDataUserOrigin
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_ssh_and_users_fields_exist(data):
|
|
||||||
if "ssh" not in data:
|
|
||||||
data["ssh"] = {}
|
|
||||||
data["ssh"]["rootKeys"] = []
|
|
||||||
|
|
||||||
elif data["ssh"].get("rootKeys") is None:
|
|
||||||
data["ssh"]["rootKeys"] = []
|
|
||||||
|
|
||||||
if "sshKeys" not in data:
|
|
||||||
data["sshKeys"] = []
|
|
||||||
|
|
||||||
if "users" not in data:
|
|
||||||
data["users"] = []
|
|
||||||
|
|
||||||
|
|
||||||
def get_users(
|
|
||||||
exclude_primary: bool = False,
|
|
||||||
exclude_root: bool = False,
|
|
||||||
) -> list[UserDataUser]:
|
|
||||||
"""Get the list of users"""
|
|
||||||
users = []
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
ensure_ssh_and_users_fields_exist(user_data)
|
|
||||||
users = [
|
|
||||||
UserDataUser(
|
|
||||||
username=user["username"],
|
|
||||||
ssh_keys=user.get("sshKeys", []),
|
|
||||||
origin=UserDataUserOrigin.NORMAL,
|
|
||||||
)
|
|
||||||
for user in user_data["users"]
|
|
||||||
]
|
|
||||||
if not exclude_primary and "username" in user_data.keys():
|
|
||||||
users.append(
|
|
||||||
UserDataUser(
|
|
||||||
username=user_data["username"],
|
|
||||||
ssh_keys=user_data["sshKeys"],
|
|
||||||
origin=UserDataUserOrigin.PRIMARY,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if not exclude_root:
|
|
||||||
users.append(
|
|
||||||
UserDataUser(
|
|
||||||
username="root",
|
|
||||||
ssh_keys=user_data["ssh"]["rootKeys"],
|
|
||||||
origin=UserDataUserOrigin.ROOT,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return users
|
|
||||||
|
|
||||||
|
|
||||||
class UsernameForbidden(Exception):
|
|
||||||
"""Attemted to create a user with a forbidden username"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UserAlreadyExists(Exception):
|
|
||||||
"""Attemted to create a user that already exists"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UsernameNotAlphanumeric(Exception):
|
|
||||||
"""Attemted to create a user with a non-alphanumeric username"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UsernameTooLong(Exception):
|
|
||||||
"""Attemted to create a user with a too long username. Username must be less than 32 characters"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PasswordIsEmpty(Exception):
|
|
||||||
"""Attemted to create a user with an empty password"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidConfiguration(Exception):
|
|
||||||
"""The userdata is broken"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def create_user(username: str, password: str):
|
|
||||||
if password == "":
|
|
||||||
raise PasswordIsEmpty("Password is empty")
|
|
||||||
|
|
||||||
if is_username_forbidden(username):
|
|
||||||
raise UsernameForbidden("Username is forbidden")
|
|
||||||
|
|
||||||
if not re.match(r"^[a-z_][a-z0-9_]+$", username):
|
|
||||||
raise UsernameNotAlphanumeric(
|
|
||||||
"Username must be alphanumeric and start with a letter"
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(username) >= 32:
|
|
||||||
raise UsernameTooLong("Username must be less than 32 characters")
|
|
||||||
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
ensure_ssh_and_users_fields_exist(user_data)
|
|
||||||
if "username" not in user_data.keys():
|
|
||||||
raise InvalidConfiguration(
|
|
||||||
"Broken config: Admin name is not defined. Consider recovery or add it manually"
|
|
||||||
)
|
|
||||||
if username == user_data["username"]:
|
|
||||||
raise UserAlreadyExists("User already exists")
|
|
||||||
if username in [user["username"] for user in user_data["users"]]:
|
|
||||||
raise UserAlreadyExists("User already exists")
|
|
||||||
|
|
||||||
hashed_password = hash_password(password)
|
|
||||||
|
|
||||||
with WriteUserData() as user_data:
|
|
||||||
ensure_ssh_and_users_fields_exist(user_data)
|
|
||||||
|
|
||||||
user_data["users"].append(
|
|
||||||
{"username": username, "sshKeys": [], "hashedPassword": hashed_password}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UserNotFound(Exception):
|
|
||||||
"""Attemted to get a user that does not exist"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UserIsProtected(Exception):
|
|
||||||
"""Attemted to delete a user that is protected"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def delete_user(username: str):
|
|
||||||
with WriteUserData() as user_data:
|
|
||||||
ensure_ssh_and_users_fields_exist(user_data)
|
|
||||||
if username == user_data["username"] or username == "root":
|
|
||||||
raise UserIsProtected("Cannot delete main or root user")
|
|
||||||
|
|
||||||
for data_user in user_data["users"]:
|
|
||||||
if data_user["username"] == username:
|
|
||||||
user_data["users"].remove(data_user)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise UserNotFound("User did not exist")
|
|
||||||
|
|
||||||
|
|
||||||
def update_user(username: str, password: str):
|
|
||||||
if password == "":
|
|
||||||
raise PasswordIsEmpty("Password is empty")
|
|
||||||
|
|
||||||
hashed_password = hash_password(password)
|
|
||||||
|
|
||||||
with WriteUserData() as data:
|
|
||||||
ensure_ssh_and_users_fields_exist(data)
|
|
||||||
|
|
||||||
if username == data["username"]:
|
|
||||||
data["hashedMasterPassword"] = hashed_password
|
|
||||||
|
|
||||||
# Return 404 if user does not exist
|
|
||||||
else:
|
|
||||||
for data_user in data["users"]:
|
|
||||||
if data_user["username"] == username:
|
|
||||||
data_user["hashedPassword"] = hashed_password
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise UserNotFound("User does not exist")
|
|
||||||
|
|
||||||
|
|
||||||
def get_user_by_username(username: str) -> Optional[UserDataUser]:
|
|
||||||
with ReadUserData() as data:
|
|
||||||
ensure_ssh_and_users_fields_exist(data)
|
|
||||||
|
|
||||||
if username == "root":
|
|
||||||
return UserDataUser(
|
|
||||||
origin=UserDataUserOrigin.ROOT,
|
|
||||||
username="root",
|
|
||||||
ssh_keys=data["ssh"]["rootKeys"],
|
|
||||||
)
|
|
||||||
|
|
||||||
if username == data["username"]:
|
|
||||||
return UserDataUser(
|
|
||||||
origin=UserDataUserOrigin.PRIMARY,
|
|
||||||
username=username,
|
|
||||||
ssh_keys=data["sshKeys"],
|
|
||||||
)
|
|
||||||
|
|
||||||
for user in data["users"]:
|
|
||||||
if user["username"] == username:
|
|
||||||
if "sshKeys" not in user:
|
|
||||||
user["sshKeys"] = []
|
|
||||||
|
|
||||||
return UserDataUser(
|
|
||||||
origin=UserDataUserOrigin.NORMAL,
|
|
||||||
username=username,
|
|
||||||
ssh_keys=user["sshKeys"],
|
|
||||||
)
|
|
||||||
|
|
||||||
return None
|
|
|
@ -1,46 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""SelfPrivacy server management API"""
|
|
||||||
from fastapi import FastAPI
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
from strawberry.fastapi import GraphQLRouter
|
|
||||||
|
|
||||||
import uvicorn
|
|
||||||
|
|
||||||
from selfprivacy_api.dependencies import get_api_version
|
|
||||||
from selfprivacy_api.graphql.schema import schema
|
|
||||||
from selfprivacy_api.migrations import run_migrations
|
|
||||||
|
|
||||||
|
|
||||||
app = FastAPI()
|
|
||||||
|
|
||||||
graphql_app = GraphQLRouter(
|
|
||||||
schema,
|
|
||||||
)
|
|
||||||
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"],
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
app.include_router(graphql_app, prefix="/graphql")
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/api/version")
|
|
||||||
async def get_version():
|
|
||||||
"""Get the version of the server"""
|
|
||||||
return {"version": get_api_version()}
|
|
||||||
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup():
|
|
||||||
run_migrations()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
uvicorn.run(
|
|
||||||
"selfprivacy_api.app:app", host="127.0.0.1", port=5050, log_level="info"
|
|
||||||
)
|
|
|
@ -1,741 +0,0 @@
|
||||||
"""
|
|
||||||
This module contains the controller class for backups.
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from os import statvfs
|
|
||||||
from typing import Callable, List, Optional
|
|
||||||
|
|
||||||
from selfprivacy_api.services import (
|
|
||||||
get_service_by_id,
|
|
||||||
get_all_services,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.services.service import (
|
|
||||||
Service,
|
|
||||||
ServiceStatus,
|
|
||||||
StoppedService,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import (
|
|
||||||
RestoreStrategy,
|
|
||||||
BackupReason,
|
|
||||||
AutobackupQuotas,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
|
|
||||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.providers import get_provider
|
|
||||||
from selfprivacy_api.backup.storage import Storage
|
|
||||||
from selfprivacy_api.backup.jobs import (
|
|
||||||
get_backup_job,
|
|
||||||
get_backup_fail,
|
|
||||||
add_backup_job,
|
|
||||||
get_restore_job,
|
|
||||||
add_restore_job,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
BACKUP_PROVIDER_ENVS = {
|
|
||||||
"kind": "BACKUP_KIND",
|
|
||||||
"login": "BACKUP_LOGIN",
|
|
||||||
"key": "BACKUP_KEY",
|
|
||||||
"location": "BACKUP_LOCATION",
|
|
||||||
}
|
|
||||||
|
|
||||||
AUTOBACKUP_JOB_EXPIRATION_SECONDS = 60 * 60 # one hour
|
|
||||||
|
|
||||||
|
|
||||||
class NotDeadError(AssertionError):
|
|
||||||
"""
|
|
||||||
This error is raised when we try to back up a service that is not dead yet.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, service: Service):
|
|
||||||
self.service_name = service.get_id()
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return f"""
|
|
||||||
Service {self.service_name} should be either stopped or dead from
|
|
||||||
an error before we back up.
|
|
||||||
Normally, this error is unreachable because we do try ensure this.
|
|
||||||
Apparently, not this time.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class RotationBucket:
|
|
||||||
"""
|
|
||||||
Bucket object used for rotation.
|
|
||||||
Has the following mutable fields:
|
|
||||||
- the counter, int
|
|
||||||
- the lambda function which takes datetime and the int and returns the int
|
|
||||||
- the last, int
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, counter: int, last: int, rotation_lambda):
|
|
||||||
self.counter: int = counter
|
|
||||||
self.last: int = last
|
|
||||||
self.rotation_lambda: Callable[[datetime, int], int] = rotation_lambda
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return f"Bucket(counter={self.counter}, last={self.last})"
|
|
||||||
|
|
||||||
|
|
||||||
class Backups:
|
|
||||||
"""A stateless controller class for backups"""
|
|
||||||
|
|
||||||
# Providers
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def provider() -> AbstractBackupProvider:
|
|
||||||
"""
|
|
||||||
Returns the current backup storage provider.
|
|
||||||
"""
|
|
||||||
return Backups._lookup_provider()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_provider(
|
|
||||||
kind: BackupProviderEnum,
|
|
||||||
login: str,
|
|
||||||
key: str,
|
|
||||||
location: str,
|
|
||||||
repo_id: str = "",
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Sets the new configuration of the backup storage provider.
|
|
||||||
|
|
||||||
In case of `BackupProviderEnum.BACKBLAZE`, the `login` is the key ID,
|
|
||||||
the `key` is the key itself, and the `location` is the bucket name and
|
|
||||||
the `repo_id` is the bucket ID.
|
|
||||||
"""
|
|
||||||
provider: AbstractBackupProvider = Backups._construct_provider(
|
|
||||||
kind,
|
|
||||||
login,
|
|
||||||
key,
|
|
||||||
location,
|
|
||||||
repo_id,
|
|
||||||
)
|
|
||||||
Storage.store_provider(provider)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset() -> None:
|
|
||||||
"""
|
|
||||||
Deletes all the data about the backup storage provider.
|
|
||||||
"""
|
|
||||||
Storage.reset()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _lookup_provider() -> AbstractBackupProvider:
|
|
||||||
redis_provider = Backups._load_provider_redis()
|
|
||||||
if redis_provider is not None:
|
|
||||||
return redis_provider
|
|
||||||
|
|
||||||
none_provider = Backups._construct_provider(
|
|
||||||
BackupProviderEnum.NONE, login="", key="", location=""
|
|
||||||
)
|
|
||||||
Storage.store_provider(none_provider)
|
|
||||||
return none_provider
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_provider_from_envs():
|
|
||||||
for env in BACKUP_PROVIDER_ENVS.values():
|
|
||||||
if env not in os.environ.keys():
|
|
||||||
raise ValueError(
|
|
||||||
f"Cannot set backup provider from envs, there is no {env} set"
|
|
||||||
)
|
|
||||||
|
|
||||||
kind_str = os.environ[BACKUP_PROVIDER_ENVS["kind"]]
|
|
||||||
kind_enum = BackupProviderEnum[kind_str]
|
|
||||||
provider = Backups._construct_provider(
|
|
||||||
kind=kind_enum,
|
|
||||||
login=os.environ[BACKUP_PROVIDER_ENVS["login"]],
|
|
||||||
key=os.environ[BACKUP_PROVIDER_ENVS["key"]],
|
|
||||||
location=os.environ[BACKUP_PROVIDER_ENVS["location"]],
|
|
||||||
)
|
|
||||||
Storage.store_provider(provider)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _construct_provider(
|
|
||||||
kind: BackupProviderEnum,
|
|
||||||
login: str,
|
|
||||||
key: str,
|
|
||||||
location: str,
|
|
||||||
repo_id: str = "",
|
|
||||||
) -> AbstractBackupProvider:
|
|
||||||
provider_class = get_provider(kind)
|
|
||||||
|
|
||||||
return provider_class(
|
|
||||||
login=login,
|
|
||||||
key=key,
|
|
||||||
location=location,
|
|
||||||
repo_id=repo_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _load_provider_redis() -> Optional[AbstractBackupProvider]:
|
|
||||||
provider_model = Storage.load_provider()
|
|
||||||
if provider_model is None:
|
|
||||||
return None
|
|
||||||
return Backups._construct_provider(
|
|
||||||
BackupProviderEnum[provider_model.kind],
|
|
||||||
provider_model.login,
|
|
||||||
provider_model.key,
|
|
||||||
provider_model.location,
|
|
||||||
provider_model.repo_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Init
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def init_repo() -> None:
|
|
||||||
"""
|
|
||||||
Initializes the backup repository. This is required once per repo.
|
|
||||||
"""
|
|
||||||
Backups.provider().backupper.init()
|
|
||||||
Storage.mark_as_init()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def erase_repo() -> None:
|
|
||||||
"""
|
|
||||||
Completely empties the remote
|
|
||||||
"""
|
|
||||||
Backups.provider().backupper.erase_repo()
|
|
||||||
Storage.mark_as_uninitted()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_initted() -> bool:
|
|
||||||
"""
|
|
||||||
Returns whether the backup repository is initialized or not.
|
|
||||||
If it is not initialized, we cannot back up and probably should
|
|
||||||
call `init_repo` first.
|
|
||||||
"""
|
|
||||||
if Storage.has_init_mark():
|
|
||||||
return True
|
|
||||||
|
|
||||||
initted = Backups.provider().backupper.is_initted()
|
|
||||||
if initted:
|
|
||||||
Storage.mark_as_init()
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Backup
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def back_up(
|
|
||||||
service: Service, reason: BackupReason = BackupReason.EXPLICIT
|
|
||||||
) -> Snapshot:
|
|
||||||
"""The top-level function to back up a service
|
|
||||||
If it fails for any reason at all, it should both mark job as
|
|
||||||
errored and re-raise an error"""
|
|
||||||
|
|
||||||
job = get_backup_job(service)
|
|
||||||
if job is None:
|
|
||||||
job = add_backup_job(service)
|
|
||||||
Jobs.update(job, status=JobStatus.RUNNING)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if service.can_be_backed_up() is False:
|
|
||||||
raise ValueError("cannot backup a non-backuppable service")
|
|
||||||
folders = service.get_folders()
|
|
||||||
service_name = service.get_id()
|
|
||||||
service.pre_backup()
|
|
||||||
snapshot = Backups.provider().backupper.start_backup(
|
|
||||||
folders,
|
|
||||||
service_name,
|
|
||||||
reason=reason,
|
|
||||||
)
|
|
||||||
|
|
||||||
Backups._on_new_snapshot_created(service_name, snapshot)
|
|
||||||
if reason == BackupReason.AUTO:
|
|
||||||
Backups._prune_auto_snaps(service)
|
|
||||||
service.post_restore()
|
|
||||||
except Exception as error:
|
|
||||||
Jobs.update(job, status=JobStatus.ERROR, error=str(error))
|
|
||||||
raise error
|
|
||||||
|
|
||||||
Jobs.update(job, status=JobStatus.FINISHED)
|
|
||||||
if reason in [BackupReason.AUTO, BackupReason.PRE_RESTORE]:
|
|
||||||
Jobs.set_expiration(job, AUTOBACKUP_JOB_EXPIRATION_SECONDS)
|
|
||||||
return Backups.sync_date_from_cache(snapshot)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def sync_date_from_cache(snapshot: Snapshot) -> Snapshot:
|
|
||||||
"""
|
|
||||||
Our snapshot creation dates are different from those on server by a tiny amount.
|
|
||||||
This is a convenience, maybe it is better to write a special comparison
|
|
||||||
function for snapshots
|
|
||||||
"""
|
|
||||||
return Storage.get_cached_snapshot_by_id(snapshot.id)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _auto_snaps(service):
|
|
||||||
return [
|
|
||||||
snap
|
|
||||||
for snap in Backups.get_snapshots(service)
|
|
||||||
if snap.reason == BackupReason.AUTO
|
|
||||||
]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _prune_snaps_with_quotas(snapshots: List[Snapshot]) -> List[Snapshot]:
|
|
||||||
# Function broken out for testability
|
|
||||||
# Sorting newest first
|
|
||||||
sorted_snaps = sorted(snapshots, key=lambda s: s.created_at, reverse=True)
|
|
||||||
quotas: AutobackupQuotas = Backups.autobackup_quotas()
|
|
||||||
|
|
||||||
buckets: list[RotationBucket] = [
|
|
||||||
RotationBucket(
|
|
||||||
quotas.last, # type: ignore
|
|
||||||
-1,
|
|
||||||
lambda _, index: index,
|
|
||||||
),
|
|
||||||
RotationBucket(
|
|
||||||
quotas.daily, # type: ignore
|
|
||||||
-1,
|
|
||||||
lambda date, _: date.year * 10000 + date.month * 100 + date.day,
|
|
||||||
),
|
|
||||||
RotationBucket(
|
|
||||||
quotas.weekly, # type: ignore
|
|
||||||
-1,
|
|
||||||
lambda date, _: date.year * 100 + date.isocalendar()[1],
|
|
||||||
),
|
|
||||||
RotationBucket(
|
|
||||||
quotas.monthly, # type: ignore
|
|
||||||
-1,
|
|
||||||
lambda date, _: date.year * 100 + date.month,
|
|
||||||
),
|
|
||||||
RotationBucket(
|
|
||||||
quotas.yearly, # type: ignore
|
|
||||||
-1,
|
|
||||||
lambda date, _: date.year,
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
new_snaplist: List[Snapshot] = []
|
|
||||||
for i, snap in enumerate(sorted_snaps):
|
|
||||||
keep_snap = False
|
|
||||||
for bucket in buckets:
|
|
||||||
if (bucket.counter > 0) or (bucket.counter == -1):
|
|
||||||
val = bucket.rotation_lambda(snap.created_at, i)
|
|
||||||
if (val != bucket.last) or (i == len(sorted_snaps) - 1):
|
|
||||||
bucket.last = val
|
|
||||||
if bucket.counter > 0:
|
|
||||||
bucket.counter -= 1
|
|
||||||
if not keep_snap:
|
|
||||||
new_snaplist.append(snap)
|
|
||||||
keep_snap = True
|
|
||||||
|
|
||||||
return new_snaplist
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _prune_auto_snaps(service) -> None:
|
|
||||||
# Not very testable by itself, so most testing is going on Backups._prune_snaps_with_quotas
|
|
||||||
# We can still test total limits and, say, daily limits
|
|
||||||
|
|
||||||
auto_snaps = Backups._auto_snaps(service)
|
|
||||||
new_snaplist = Backups._prune_snaps_with_quotas(auto_snaps)
|
|
||||||
|
|
||||||
deletable_snaps = [snap for snap in auto_snaps if snap not in new_snaplist]
|
|
||||||
Backups.forget_snapshots(deletable_snaps)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _standardize_quotas(i: int) -> int:
|
|
||||||
if i <= -1:
|
|
||||||
i = -1
|
|
||||||
return i
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def autobackup_quotas() -> AutobackupQuotas:
|
|
||||||
"""0 means do not keep, -1 means unlimited"""
|
|
||||||
|
|
||||||
return Storage.autobackup_quotas()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
|
|
||||||
"""0 means do not keep, -1 means unlimited"""
|
|
||||||
|
|
||||||
Storage.set_autobackup_quotas(
|
|
||||||
AutobackupQuotas(
|
|
||||||
last=Backups._standardize_quotas(quotas.last), # type: ignore
|
|
||||||
daily=Backups._standardize_quotas(quotas.daily), # type: ignore
|
|
||||||
weekly=Backups._standardize_quotas(quotas.weekly), # type: ignore
|
|
||||||
monthly=Backups._standardize_quotas(quotas.monthly), # type: ignore
|
|
||||||
yearly=Backups._standardize_quotas(quotas.yearly), # type: ignore
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# do not prune all autosnaps right away, this will be done by an async task
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def prune_all_autosnaps() -> None:
|
|
||||||
for service in get_all_services():
|
|
||||||
Backups._prune_auto_snaps(service)
|
|
||||||
|
|
||||||
# Restoring
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _ensure_queued_restore_job(service, snapshot) -> Job:
|
|
||||||
job = get_restore_job(service)
|
|
||||||
if job is None:
|
|
||||||
job = add_restore_job(snapshot)
|
|
||||||
|
|
||||||
Jobs.update(job, status=JobStatus.CREATED)
|
|
||||||
return job
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _inplace_restore(
|
|
||||||
service: Service,
|
|
||||||
snapshot: Snapshot,
|
|
||||||
job: Job,
|
|
||||||
) -> None:
|
|
||||||
Jobs.update(
|
|
||||||
job, status=JobStatus.CREATED, status_text="Waiting for pre-restore backup"
|
|
||||||
)
|
|
||||||
failsafe_snapshot = Backups.back_up(service, BackupReason.PRE_RESTORE)
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job, status=JobStatus.RUNNING, status_text=f"Restoring from {snapshot.id}"
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
Backups._restore_service_from_snapshot(
|
|
||||||
service,
|
|
||||||
snapshot.id,
|
|
||||||
verify=False,
|
|
||||||
)
|
|
||||||
except Exception as error:
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
status_text=f"Restore failed with {str(error)}, reverting to {failsafe_snapshot.id}",
|
|
||||||
)
|
|
||||||
Backups._restore_service_from_snapshot(
|
|
||||||
service, failsafe_snapshot.id, verify=False
|
|
||||||
)
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
status_text=f"Restore failed with {str(error)}, reverted to {failsafe_snapshot.id}",
|
|
||||||
)
|
|
||||||
raise error
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def restore_snapshot(
|
|
||||||
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
|
||||||
) -> None:
|
|
||||||
"""Restores a snapshot to its original service using the given strategy"""
|
|
||||||
service = get_service_by_id(snapshot.service_name)
|
|
||||||
if service is None:
|
|
||||||
raise ValueError(
|
|
||||||
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
|
||||||
)
|
|
||||||
job = Backups._ensure_queued_restore_job(service, snapshot)
|
|
||||||
|
|
||||||
try:
|
|
||||||
Backups._assert_restorable(snapshot)
|
|
||||||
Jobs.update(
|
|
||||||
job, status=JobStatus.RUNNING, status_text="Stopping the service"
|
|
||||||
)
|
|
||||||
with StoppedService(service):
|
|
||||||
Backups.assert_dead(service)
|
|
||||||
if strategy == RestoreStrategy.INPLACE:
|
|
||||||
Backups._inplace_restore(service, snapshot, job)
|
|
||||||
else: # verify_before_download is our default
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text=f"Restoring from {snapshot.id}",
|
|
||||||
)
|
|
||||||
Backups._restore_service_from_snapshot(
|
|
||||||
service, snapshot.id, verify=True
|
|
||||||
)
|
|
||||||
|
|
||||||
service.post_restore()
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=90,
|
|
||||||
status_text="Restarting the service",
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as error:
|
|
||||||
Jobs.update(job, status=JobStatus.ERROR, status_text=str(error))
|
|
||||||
raise error
|
|
||||||
|
|
||||||
Jobs.update(job, status=JobStatus.FINISHED)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _assert_restorable(
|
|
||||||
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
|
||||||
) -> None:
|
|
||||||
service = get_service_by_id(snapshot.service_name)
|
|
||||||
if service is None:
|
|
||||||
raise ValueError(
|
|
||||||
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
restored_snap_size = Backups.snapshot_restored_size(snapshot.id)
|
|
||||||
|
|
||||||
if strategy == RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE:
|
|
||||||
needed_space = restored_snap_size
|
|
||||||
elif strategy == RestoreStrategy.INPLACE:
|
|
||||||
needed_space = restored_snap_size - service.get_storage_usage()
|
|
||||||
else:
|
|
||||||
raise NotImplementedError(
|
|
||||||
"""
|
|
||||||
We do not know if there is enough space for restoration because
|
|
||||||
there is some novel restore strategy used!
|
|
||||||
This is a developer's fault, open an issue please
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
available_space = Backups.space_usable_for_service(service)
|
|
||||||
if needed_space > available_space:
|
|
||||||
raise ValueError(
|
|
||||||
f"we only have {available_space} bytes "
|
|
||||||
f"but snapshot needs {needed_space}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _restore_service_from_snapshot(
|
|
||||||
service: Service,
|
|
||||||
snapshot_id: str,
|
|
||||||
verify=True,
|
|
||||||
) -> None:
|
|
||||||
folders = service.get_folders()
|
|
||||||
|
|
||||||
Backups.provider().backupper.restore_from_backup(
|
|
||||||
snapshot_id,
|
|
||||||
folders,
|
|
||||||
verify=verify,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Snapshots
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_snapshots(service: Service) -> List[Snapshot]:
|
|
||||||
"""Returns all snapshots for a given service"""
|
|
||||||
snapshots = Backups.get_all_snapshots()
|
|
||||||
service_id = service.get_id()
|
|
||||||
return list(
|
|
||||||
filter(
|
|
||||||
lambda snap: snap.service_name == service_id,
|
|
||||||
snapshots,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_all_snapshots() -> List[Snapshot]:
|
|
||||||
"""Returns all snapshots"""
|
|
||||||
# When we refresh our cache:
|
|
||||||
# 1. Manually
|
|
||||||
# 2. On timer
|
|
||||||
# 3. On new snapshot
|
|
||||||
# 4. On snapshot deletion
|
|
||||||
|
|
||||||
return Storage.get_cached_snapshots()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
|
||||||
"""Returns a backup snapshot by its id"""
|
|
||||||
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
|
||||||
if snap is not None:
|
|
||||||
return snap
|
|
||||||
|
|
||||||
# Possibly our cache entry got invalidated, let's try one more time
|
|
||||||
Backups.force_snapshot_cache_reload()
|
|
||||||
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
|
||||||
|
|
||||||
return snap
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def forget_snapshots(snapshots: List[Snapshot]) -> None:
|
|
||||||
"""
|
|
||||||
Deletes a batch of snapshots from the repo and syncs cache
|
|
||||||
Optimized
|
|
||||||
"""
|
|
||||||
ids = [snapshot.id for snapshot in snapshots]
|
|
||||||
Backups.provider().backupper.forget_snapshots(ids)
|
|
||||||
|
|
||||||
Backups.force_snapshot_cache_reload()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def forget_snapshot(snapshot: Snapshot) -> None:
|
|
||||||
"""Deletes a snapshot from the repo and from cache"""
|
|
||||||
Backups.forget_snapshots([snapshot])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def forget_all_snapshots():
|
|
||||||
"""
|
|
||||||
Mark all snapshots we have made for deletion and make them inaccessible
|
|
||||||
(this is done by cloud, we only issue a command)
|
|
||||||
"""
|
|
||||||
Backups.forget_snapshots(Backups.get_all_snapshots())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def force_snapshot_cache_reload() -> None:
|
|
||||||
"""
|
|
||||||
Forces a reload of the snapshot cache.
|
|
||||||
|
|
||||||
This may be an expensive operation, so use it wisely.
|
|
||||||
User pays for the API calls.
|
|
||||||
"""
|
|
||||||
upstream_snapshots = Backups.provider().backupper.get_snapshots()
|
|
||||||
Storage.invalidate_snapshot_storage()
|
|
||||||
for snapshot in upstream_snapshots:
|
|
||||||
Storage.cache_snapshot(snapshot)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def snapshot_restored_size(snapshot_id: str) -> int:
|
|
||||||
"""Returns the size of the snapshot"""
|
|
||||||
return Backups.provider().backupper.restored_size(
|
|
||||||
snapshot_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _on_new_snapshot_created(service_id: str, snapshot: Snapshot) -> None:
|
|
||||||
"""What do we do with a snapshot that is just made?"""
|
|
||||||
# non-expiring timestamp of the last
|
|
||||||
Storage.store_last_timestamp(service_id, snapshot)
|
|
||||||
Backups.force_snapshot_cache_reload()
|
|
||||||
|
|
||||||
# Autobackup
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def autobackup_period_minutes() -> Optional[int]:
|
|
||||||
"""None means autobackup is disabled"""
|
|
||||||
return Storage.autobackup_period_minutes()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_autobackup_period_minutes(minutes: int) -> None:
|
|
||||||
"""
|
|
||||||
0 and negative numbers are equivalent to disable.
|
|
||||||
Setting to a positive number may result in a backup very soon
|
|
||||||
if some services are not backed up.
|
|
||||||
"""
|
|
||||||
if minutes <= 0:
|
|
||||||
Backups.disable_all_autobackup()
|
|
||||||
return
|
|
||||||
Storage.store_autobackup_period_minutes(minutes)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def disable_all_autobackup() -> None:
|
|
||||||
"""
|
|
||||||
Disables all automatic backing up,
|
|
||||||
but does not change per-service settings
|
|
||||||
"""
|
|
||||||
Storage.delete_backup_period()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_time_to_backup(time: datetime) -> bool:
|
|
||||||
"""
|
|
||||||
Intended as a time validator for huey cron scheduler
|
|
||||||
of automatic backups
|
|
||||||
"""
|
|
||||||
|
|
||||||
return Backups.services_to_back_up(time) != []
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def services_to_back_up(time: datetime) -> List[Service]:
|
|
||||||
"""Returns a list of services that should be backed up at a given time"""
|
|
||||||
return [
|
|
||||||
service
|
|
||||||
for service in get_all_services()
|
|
||||||
if Backups.is_time_to_backup_service(service, time)
|
|
||||||
]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_last_backed_up(service: Service) -> Optional[datetime]:
|
|
||||||
"""Get a timezone-aware time of the last backup of a service"""
|
|
||||||
return Storage.get_last_backup_time(service.get_id())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_last_backup_error_time(service: Service) -> Optional[datetime]:
|
|
||||||
"""Get a timezone-aware time of the last backup of a service"""
|
|
||||||
job = get_backup_fail(service)
|
|
||||||
if job is not None:
|
|
||||||
datetime_created = job.created_at
|
|
||||||
if datetime_created.tzinfo is None:
|
|
||||||
# assume it is in localtime
|
|
||||||
offset = timedelta(seconds=time.localtime().tm_gmtoff)
|
|
||||||
datetime_created = datetime_created - offset
|
|
||||||
return datetime.combine(
|
|
||||||
datetime_created.date(), datetime_created.time(), timezone.utc
|
|
||||||
)
|
|
||||||
return datetime_created
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_time_to_backup_service(service: Service, time: datetime):
|
|
||||||
"""Returns True if it is time to back up a service"""
|
|
||||||
period = Backups.autobackup_period_minutes()
|
|
||||||
if period is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not service.is_enabled():
|
|
||||||
return False
|
|
||||||
if not service.can_be_backed_up():
|
|
||||||
return False
|
|
||||||
|
|
||||||
last_error = Backups.get_last_backup_error_time(service)
|
|
||||||
|
|
||||||
if last_error is not None:
|
|
||||||
if time < last_error + timedelta(seconds=AUTOBACKUP_JOB_EXPIRATION_SECONDS):
|
|
||||||
return False
|
|
||||||
|
|
||||||
last_backup = Backups.get_last_backed_up(service)
|
|
||||||
|
|
||||||
# Queue a backup immediately if there are no previous backups
|
|
||||||
if last_backup is None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if time > last_backup + timedelta(minutes=period):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Helpers
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def space_usable_for_service(service: Service) -> int:
|
|
||||||
"""
|
|
||||||
Returns the amount of space available on the volume the given
|
|
||||||
service is located on.
|
|
||||||
"""
|
|
||||||
folders = service.get_folders()
|
|
||||||
if folders == []:
|
|
||||||
raise ValueError("unallocated service", service.get_id())
|
|
||||||
|
|
||||||
# We assume all folders of one service live at the same volume
|
|
||||||
fs_info = statvfs(folders[0])
|
|
||||||
usable_bytes = fs_info.f_frsize * fs_info.f_bavail
|
|
||||||
return usable_bytes
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_localfile_repo(file_path: str):
|
|
||||||
"""Used by tests to set a local folder as a backup repo"""
|
|
||||||
# pylint: disable-next=invalid-name
|
|
||||||
ProviderClass = get_provider(BackupProviderEnum.FILE)
|
|
||||||
provider = ProviderClass(
|
|
||||||
login="",
|
|
||||||
key="",
|
|
||||||
location=file_path,
|
|
||||||
repo_id="",
|
|
||||||
)
|
|
||||||
Storage.store_provider(provider)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def assert_dead(service: Service):
|
|
||||||
"""
|
|
||||||
Checks if a service is dead and can be safely restored from a snapshot.
|
|
||||||
"""
|
|
||||||
if service.get_status() not in [
|
|
||||||
ServiceStatus.INACTIVE,
|
|
||||||
ServiceStatus.FAILED,
|
|
||||||
]:
|
|
||||||
raise NotDeadError(service)
|
|
|
@ -1,73 +0,0 @@
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractBackupper(ABC):
|
|
||||||
"""Abstract class for backuppers"""
|
|
||||||
|
|
||||||
# flake8: noqa: B027
|
|
||||||
def __init__(self) -> None:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def is_initted(self) -> bool:
|
|
||||||
"""Returns true if the repository is initted"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def set_creds(self, account: str, key: str, repo: str) -> None:
|
|
||||||
"""Set the credentials for the backupper"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def start_backup(
|
|
||||||
self,
|
|
||||||
folders: List[str],
|
|
||||||
service_name: str,
|
|
||||||
reason: BackupReason = BackupReason.EXPLICIT,
|
|
||||||
) -> Snapshot:
|
|
||||||
"""Start a backup of the given folders"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_snapshots(self) -> List[Snapshot]:
|
|
||||||
"""Get all snapshots from the repo"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def init(self) -> None:
|
|
||||||
"""Initialize the repository"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def erase_repo(self) -> None:
|
|
||||||
"""Completely empties the remote"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def restore_from_backup(
|
|
||||||
self,
|
|
||||||
snapshot_id: str,
|
|
||||||
folders: List[str],
|
|
||||||
verify=True,
|
|
||||||
) -> None:
|
|
||||||
"""Restore a target folder using a snapshot"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def restored_size(self, snapshot_id: str) -> int:
|
|
||||||
"""Get the size of the restored snapshot"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def forget_snapshot(self, snapshot_id) -> None:
|
|
||||||
"""Forget a snapshot"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
|
|
||||||
"""Maybe optimized deletion of a batch of snapshots, just cycling if unsupported"""
|
|
||||||
raise NotImplementedError
|
|
|
@ -1,45 +0,0 @@
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
|
||||||
|
|
||||||
|
|
||||||
class NoneBackupper(AbstractBackupper):
|
|
||||||
"""A backupper that does nothing"""
|
|
||||||
|
|
||||||
def is_initted(self, repo_name: str = "") -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def set_creds(self, account: str, key: str, repo: str):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def start_backup(
|
|
||||||
self, folders: List[str], tag: str, reason: BackupReason = BackupReason.EXPLICIT
|
|
||||||
):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_snapshots(self) -> List[Snapshot]:
|
|
||||||
"""Get all snapshots from the repo"""
|
|
||||||
return []
|
|
||||||
|
|
||||||
def init(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def erase_repo(self) -> None:
|
|
||||||
"""Completely empties the remote"""
|
|
||||||
# this one is already empty
|
|
||||||
pass
|
|
||||||
|
|
||||||
def restore_from_backup(self, snapshot_id: str, folders: List[str], verify=True):
|
|
||||||
"""Restore a target folder using a snapshot"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def restored_size(self, snapshot_id: str) -> int:
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def forget_snapshot(self, snapshot_id):
|
|
||||||
raise NotImplementedError("forget_snapshot")
|
|
||||||
|
|
||||||
def forget_snapshots(self, snapshots):
|
|
||||||
raise NotImplementedError("forget_snapshots")
|
|
|
@ -1,554 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
|
||||||
import datetime
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from typing import List, Optional, TypeVar, Callable
|
|
||||||
from collections.abc import Iterable
|
|
||||||
from json.decoder import JSONDecodeError
|
|
||||||
from os.path import exists, join
|
|
||||||
from os import mkdir
|
|
||||||
from shutil import rmtree
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
|
||||||
from selfprivacy_api.backup.util import output_yielder, sync
|
|
||||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.backup.jobs import get_backup_job
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
|
||||||
|
|
||||||
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
|
||||||
|
|
||||||
SHORT_ID_LEN = 8
|
|
||||||
|
|
||||||
T = TypeVar("T", bound=Callable)
|
|
||||||
|
|
||||||
|
|
||||||
def unlocked_repo(func: T) -> T:
|
|
||||||
"""unlock repo and retry if it appears to be locked"""
|
|
||||||
|
|
||||||
def inner(self: ResticBackupper, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
return func(self, *args, **kwargs)
|
|
||||||
except Exception as error:
|
|
||||||
if "unable to create lock" in str(error):
|
|
||||||
self.unlock()
|
|
||||||
return func(self, *args, **kwargs)
|
|
||||||
else:
|
|
||||||
raise error
|
|
||||||
|
|
||||||
# Above, we manually guarantee that the type returned is compatible.
|
|
||||||
return inner # type: ignore
|
|
||||||
|
|
||||||
|
|
||||||
class ResticBackupper(AbstractBackupper):
|
|
||||||
def __init__(self, login_flag: str, key_flag: str, storage_type: str) -> None:
|
|
||||||
self.login_flag = login_flag
|
|
||||||
self.key_flag = key_flag
|
|
||||||
self.storage_type = storage_type
|
|
||||||
self.account = ""
|
|
||||||
self.key = ""
|
|
||||||
self.repo = ""
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def set_creds(self, account: str, key: str, repo: str) -> None:
|
|
||||||
self.account = account
|
|
||||||
self.key = key
|
|
||||||
self.repo = repo
|
|
||||||
|
|
||||||
def restic_repo(self) -> str:
|
|
||||||
# https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#other-services-via-rclone
|
|
||||||
# https://forum.rclone.org/t/can-rclone-be-run-solely-with-command-line-options-no-config-no-env-vars/6314/5
|
|
||||||
return f"rclone:{self.rclone_repo()}"
|
|
||||||
|
|
||||||
def rclone_repo(self) -> str:
|
|
||||||
return f"{self.storage_type}{self.repo}"
|
|
||||||
|
|
||||||
def rclone_args(self):
|
|
||||||
return "rclone.args=serve restic --stdio " + " ".join(
|
|
||||||
self.backend_rclone_args()
|
|
||||||
)
|
|
||||||
|
|
||||||
def backend_rclone_args(self) -> list[str]:
|
|
||||||
args = []
|
|
||||||
if self.account != "":
|
|
||||||
acc_args = [self.login_flag, self.account]
|
|
||||||
args.extend(acc_args)
|
|
||||||
if self.key != "":
|
|
||||||
key_args = [self.key_flag, self.key]
|
|
||||||
args.extend(key_args)
|
|
||||||
return args
|
|
||||||
|
|
||||||
def _password_command(self):
|
|
||||||
return f"echo {LocalBackupSecret.get()}"
|
|
||||||
|
|
||||||
def restic_command(self, *args, tags: Optional[List[str]] = None) -> List[str]:
|
|
||||||
"""
|
|
||||||
Construct a restic command against the currently configured repo
|
|
||||||
Can support [nested] arrays as arguments, will flatten them into the final commmand
|
|
||||||
"""
|
|
||||||
if tags is None:
|
|
||||||
tags = []
|
|
||||||
|
|
||||||
command = [
|
|
||||||
"restic",
|
|
||||||
"-o",
|
|
||||||
self.rclone_args(),
|
|
||||||
"-r",
|
|
||||||
self.restic_repo(),
|
|
||||||
"--password-command",
|
|
||||||
self._password_command(),
|
|
||||||
]
|
|
||||||
if tags != []:
|
|
||||||
for tag in tags:
|
|
||||||
command.extend(
|
|
||||||
[
|
|
||||||
"--tag",
|
|
||||||
tag,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
if args:
|
|
||||||
command.extend(ResticBackupper.__flatten_list(args))
|
|
||||||
return command
|
|
||||||
|
|
||||||
def erase_repo(self) -> None:
|
|
||||||
"""Fully erases repo on remote, can be reinitted again"""
|
|
||||||
command = [
|
|
||||||
"rclone",
|
|
||||||
"purge",
|
|
||||||
self.rclone_repo(),
|
|
||||||
]
|
|
||||||
backend_args = self.backend_rclone_args()
|
|
||||||
if backend_args:
|
|
||||||
command.extend(backend_args)
|
|
||||||
|
|
||||||
with subprocess.Popen(command, stdout=subprocess.PIPE, shell=False) as handle:
|
|
||||||
output = handle.communicate()[0].decode("utf-8")
|
|
||||||
if handle.returncode != 0:
|
|
||||||
raise ValueError(
|
|
||||||
"purge exited with errorcode",
|
|
||||||
handle.returncode,
|
|
||||||
":",
|
|
||||||
output,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __flatten_list(list_to_flatten):
|
|
||||||
"""string-aware list flattener"""
|
|
||||||
result = []
|
|
||||||
for item in list_to_flatten:
|
|
||||||
if isinstance(item, Iterable) and not isinstance(item, str):
|
|
||||||
result.extend(ResticBackupper.__flatten_list(item))
|
|
||||||
continue
|
|
||||||
result.append(item)
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _run_backup_command(
|
|
||||||
backup_command: List[str], job: Optional[Job]
|
|
||||||
) -> List[dict]:
|
|
||||||
"""And handle backup output"""
|
|
||||||
messages = []
|
|
||||||
output = []
|
|
||||||
restic_reported_error = False
|
|
||||||
|
|
||||||
for raw_message in output_yielder(backup_command):
|
|
||||||
if "ERROR:" in raw_message:
|
|
||||||
restic_reported_error = True
|
|
||||||
output.append(raw_message)
|
|
||||||
|
|
||||||
if not restic_reported_error:
|
|
||||||
message = ResticBackupper.parse_message(raw_message, job)
|
|
||||||
messages.append(message)
|
|
||||||
|
|
||||||
if restic_reported_error:
|
|
||||||
raise ValueError(
|
|
||||||
"Restic returned error(s): ",
|
|
||||||
output,
|
|
||||||
)
|
|
||||||
|
|
||||||
return messages
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _replace_in_array(array: List[str], target, replacement) -> None:
|
|
||||||
if target == "":
|
|
||||||
return
|
|
||||||
|
|
||||||
for i, value in enumerate(array):
|
|
||||||
if target in value:
|
|
||||||
array[i] = array[i].replace(target, replacement)
|
|
||||||
|
|
||||||
def _censor_command(self, command: List[str]) -> List[str]:
|
|
||||||
result = command.copy()
|
|
||||||
ResticBackupper._replace_in_array(result, self.key, "CENSORED")
|
|
||||||
ResticBackupper._replace_in_array(result, LocalBackupSecret.get(), "CENSORED")
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_backup_job(service_name: str) -> Optional[Job]:
|
|
||||||
service = get_service_by_id(service_name)
|
|
||||||
if service is None:
|
|
||||||
raise ValueError("No service with id ", service_name)
|
|
||||||
|
|
||||||
return get_backup_job(service)
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def start_backup(
|
|
||||||
self,
|
|
||||||
folders: List[str],
|
|
||||||
service_name: str,
|
|
||||||
reason: BackupReason = BackupReason.EXPLICIT,
|
|
||||||
) -> Snapshot:
|
|
||||||
"""
|
|
||||||
Start backup with restic
|
|
||||||
"""
|
|
||||||
assert len(folders) != 0
|
|
||||||
|
|
||||||
job = ResticBackupper._get_backup_job(service_name)
|
|
||||||
|
|
||||||
tags = [service_name, reason.value]
|
|
||||||
backup_command = self.restic_command(
|
|
||||||
"backup",
|
|
||||||
"--json",
|
|
||||||
folders,
|
|
||||||
tags=tags,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
messages = ResticBackupper._run_backup_command(backup_command, job)
|
|
||||||
|
|
||||||
id = ResticBackupper._snapshot_id_from_backup_messages(messages)
|
|
||||||
return Snapshot(
|
|
||||||
created_at=datetime.datetime.now(datetime.timezone.utc),
|
|
||||||
id=id,
|
|
||||||
service_name=service_name,
|
|
||||||
reason=reason,
|
|
||||||
)
|
|
||||||
|
|
||||||
except ValueError as error:
|
|
||||||
raise ValueError(
|
|
||||||
"Could not create a snapshot: ",
|
|
||||||
str(error),
|
|
||||||
"command: ",
|
|
||||||
self._censor_command(backup_command),
|
|
||||||
) from error
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _snapshot_id_from_backup_messages(messages) -> str:
|
|
||||||
for message in messages:
|
|
||||||
if message["message_type"] == "summary":
|
|
||||||
# There is a discrepancy between versions of restic/rclone
|
|
||||||
# Some report short_id in this field and some full
|
|
||||||
return message["snapshot_id"][0:SHORT_ID_LEN]
|
|
||||||
|
|
||||||
raise ValueError("no summary message in restic json output")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_message(raw_message_line: str, job: Optional[Job] = None) -> dict:
|
|
||||||
message = ResticBackupper.parse_json_output(raw_message_line)
|
|
||||||
if not isinstance(message, dict):
|
|
||||||
raise ValueError("we have too many messages on one line?")
|
|
||||||
if message["message_type"] == "status":
|
|
||||||
if job is not None: # only update status if we run under some job
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
JobStatus.RUNNING,
|
|
||||||
progress=int(message["percent_done"] * 100),
|
|
||||||
)
|
|
||||||
return message
|
|
||||||
|
|
||||||
def init(self) -> None:
|
|
||||||
init_command = self.restic_command(
|
|
||||||
"init",
|
|
||||||
)
|
|
||||||
with subprocess.Popen(
|
|
||||||
init_command,
|
|
||||||
shell=False,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
) as process_handle:
|
|
||||||
output = process_handle.communicate()[0].decode("utf-8")
|
|
||||||
if "created restic repository" not in output:
|
|
||||||
raise ValueError("cannot init a repo: " + output)
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def is_initted(self) -> bool:
|
|
||||||
command = self.restic_command(
|
|
||||||
"check",
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
shell=False,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
) as handle:
|
|
||||||
output = handle.communicate()[0].decode("utf-8")
|
|
||||||
if handle.returncode != 0:
|
|
||||||
if "unable to create lock" in output:
|
|
||||||
raise ValueError("Stale lock detected: ", output)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def unlock(self) -> None:
|
|
||||||
"""Remove stale locks."""
|
|
||||||
command = self.restic_command(
|
|
||||||
"unlock",
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
shell=False,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
) as handle:
|
|
||||||
# communication forces to complete and for returncode to get defined
|
|
||||||
output = handle.communicate()[0].decode("utf-8")
|
|
||||||
if handle.returncode != 0:
|
|
||||||
raise ValueError("cannot unlock the backup repository: ", output)
|
|
||||||
|
|
||||||
def lock(self) -> None:
|
|
||||||
"""
|
|
||||||
Introduce a stale lock.
|
|
||||||
Mainly for testing purposes.
|
|
||||||
Double lock is supposed to fail
|
|
||||||
"""
|
|
||||||
command = self.restic_command(
|
|
||||||
"check",
|
|
||||||
)
|
|
||||||
|
|
||||||
# using temporary cache in /run/user/1000/restic-check-cache-817079729
|
|
||||||
# repository 9639c714 opened (repository version 2) successfully, password is correct
|
|
||||||
# created new cache in /run/user/1000/restic-check-cache-817079729
|
|
||||||
# create exclusive lock for repository
|
|
||||||
# load indexes
|
|
||||||
# check all packs
|
|
||||||
# check snapshots, trees and blobs
|
|
||||||
# [0:00] 100.00% 1 / 1 snapshots
|
|
||||||
# no errors were found
|
|
||||||
|
|
||||||
try:
|
|
||||||
for line in output_yielder(command):
|
|
||||||
if "indexes" in line:
|
|
||||||
break
|
|
||||||
if "unable" in line:
|
|
||||||
raise ValueError(line)
|
|
||||||
except Exception as error:
|
|
||||||
raise ValueError("could not lock repository") from error
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def restored_size(self, snapshot_id: str) -> int:
|
|
||||||
"""
|
|
||||||
Size of a snapshot
|
|
||||||
"""
|
|
||||||
command = self.restic_command(
|
|
||||||
"stats",
|
|
||||||
snapshot_id,
|
|
||||||
"--json",
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
shell=False,
|
|
||||||
) as handle:
|
|
||||||
output = handle.communicate()[0].decode("utf-8")
|
|
||||||
try:
|
|
||||||
parsed_output = ResticBackupper.parse_json_output(output)
|
|
||||||
return parsed_output["total_size"]
|
|
||||||
except ValueError as error:
|
|
||||||
raise ValueError("cannot restore a snapshot: " + output) from error
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def restore_from_backup(
|
|
||||||
self,
|
|
||||||
snapshot_id,
|
|
||||||
folders: List[str],
|
|
||||||
verify=True,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Restore from backup with restic
|
|
||||||
"""
|
|
||||||
if folders is None or folders == []:
|
|
||||||
raise ValueError("cannot restore without knowing where to!")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
|
||||||
if verify:
|
|
||||||
self._raw_verified_restore(snapshot_id, target=temp_dir)
|
|
||||||
snapshot_root = temp_dir
|
|
||||||
for folder in folders:
|
|
||||||
src = join(snapshot_root, folder.strip("/"))
|
|
||||||
if not exists(src):
|
|
||||||
raise ValueError(
|
|
||||||
f"No such path: {src}. We tried to find {folder}"
|
|
||||||
)
|
|
||||||
dst = folder
|
|
||||||
sync(src, dst)
|
|
||||||
|
|
||||||
else: # attempting inplace restore
|
|
||||||
for folder in folders:
|
|
||||||
rmtree(folder)
|
|
||||||
mkdir(folder)
|
|
||||||
self._raw_verified_restore(snapshot_id, target="/")
|
|
||||||
return
|
|
||||||
|
|
||||||
def _raw_verified_restore(self, snapshot_id, target="/"):
|
|
||||||
"""barebones restic restore"""
|
|
||||||
restore_command = self.restic_command(
|
|
||||||
"restore", snapshot_id, "--target", target, "--verify"
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
restore_command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
shell=False,
|
|
||||||
) as handle:
|
|
||||||
# for some reason restore does not support
|
|
||||||
# nice reporting of progress via json
|
|
||||||
output = handle.communicate()[0].decode("utf-8")
|
|
||||||
if "restoring" not in output:
|
|
||||||
raise ValueError("cannot restore a snapshot: " + output)
|
|
||||||
|
|
||||||
assert (
|
|
||||||
handle.returncode is not None
|
|
||||||
) # none should be impossible after communicate
|
|
||||||
if handle.returncode != 0:
|
|
||||||
raise ValueError(
|
|
||||||
"restore exited with errorcode",
|
|
||||||
handle.returncode,
|
|
||||||
":",
|
|
||||||
output,
|
|
||||||
)
|
|
||||||
|
|
||||||
def forget_snapshot(self, snapshot_id: str) -> None:
|
|
||||||
self.forget_snapshots([snapshot_id])
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
|
|
||||||
# in case the backupper program supports batching, otherwise implement it by cycling
|
|
||||||
forget_command = self.restic_command(
|
|
||||||
"forget",
|
|
||||||
[snapshot_ids],
|
|
||||||
# TODO: prune should be done in a separate process
|
|
||||||
"--prune",
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
forget_command,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
shell=False,
|
|
||||||
) as handle:
|
|
||||||
# for some reason restore does not support
|
|
||||||
# nice reporting of progress via json
|
|
||||||
output, err = [
|
|
||||||
string.decode(
|
|
||||||
"utf-8",
|
|
||||||
)
|
|
||||||
for string in handle.communicate()
|
|
||||||
]
|
|
||||||
|
|
||||||
if "no matching ID found" in err:
|
|
||||||
raise ValueError(
|
|
||||||
"trying to delete, but no such snapshot(s): ", snapshot_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
assert (
|
|
||||||
handle.returncode is not None
|
|
||||||
) # none should be impossible after communicate
|
|
||||||
if handle.returncode != 0:
|
|
||||||
raise ValueError(
|
|
||||||
"forget exited with errorcode", handle.returncode, ":", output, err
|
|
||||||
)
|
|
||||||
|
|
||||||
def _load_snapshots(self) -> object:
|
|
||||||
"""
|
|
||||||
Load list of snapshots from repository
|
|
||||||
raises Value Error if repo does not exist
|
|
||||||
"""
|
|
||||||
listing_command = self.restic_command(
|
|
||||||
"snapshots",
|
|
||||||
"--json",
|
|
||||||
)
|
|
||||||
|
|
||||||
with subprocess.Popen(
|
|
||||||
listing_command,
|
|
||||||
shell=False,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
) as backup_listing_process_descriptor:
|
|
||||||
output = backup_listing_process_descriptor.communicate()[0].decode("utf-8")
|
|
||||||
|
|
||||||
if "Is there a repository at the following location?" in output:
|
|
||||||
raise ValueError("No repository! : " + output)
|
|
||||||
try:
|
|
||||||
return ResticBackupper.parse_json_output(output)
|
|
||||||
except ValueError as error:
|
|
||||||
raise ValueError("Cannot load snapshots: ", output) from error
|
|
||||||
|
|
||||||
@unlocked_repo
|
|
||||||
def get_snapshots(self) -> List[Snapshot]:
|
|
||||||
"""Get all snapshots from the repo"""
|
|
||||||
snapshots = []
|
|
||||||
|
|
||||||
for restic_snapshot in self._load_snapshots():
|
|
||||||
# Compatibility with previous snaps:
|
|
||||||
if len(restic_snapshot["tags"]) == 1:
|
|
||||||
reason = BackupReason.EXPLICIT
|
|
||||||
else:
|
|
||||||
reason = restic_snapshot["tags"][1]
|
|
||||||
|
|
||||||
snapshot = Snapshot(
|
|
||||||
id=restic_snapshot["short_id"],
|
|
||||||
created_at=restic_snapshot["time"],
|
|
||||||
service_name=restic_snapshot["tags"][0],
|
|
||||||
reason=reason,
|
|
||||||
)
|
|
||||||
|
|
||||||
snapshots.append(snapshot)
|
|
||||||
return snapshots
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_json_output(output: str) -> object:
|
|
||||||
starting_index = ResticBackupper.json_start(output)
|
|
||||||
|
|
||||||
if starting_index == -1:
|
|
||||||
raise ValueError("There is no json in the restic output: " + output)
|
|
||||||
|
|
||||||
truncated_output = output[starting_index:]
|
|
||||||
json_messages = truncated_output.splitlines()
|
|
||||||
if len(json_messages) == 1:
|
|
||||||
try:
|
|
||||||
return json.loads(truncated_output)
|
|
||||||
except JSONDecodeError as error:
|
|
||||||
raise ValueError(
|
|
||||||
"There is no json in the restic output : " + output
|
|
||||||
) from error
|
|
||||||
|
|
||||||
result_array = []
|
|
||||||
for message in json_messages:
|
|
||||||
result_array.append(json.loads(message))
|
|
||||||
return result_array
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def json_start(output: str) -> int:
|
|
||||||
indices = [
|
|
||||||
output.find("["),
|
|
||||||
output.find("{"),
|
|
||||||
]
|
|
||||||
indices = [x for x in indices if x != -1]
|
|
||||||
|
|
||||||
if indices == []:
|
|
||||||
return -1
|
|
||||||
return min(indices)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def has_json(output: str) -> bool:
|
|
||||||
if ResticBackupper.json_start(output) == -1:
|
|
||||||
return False
|
|
||||||
return True
|
|
|
@ -1,115 +0,0 @@
|
||||||
from typing import Optional, List
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.jobs import Jobs, Job, JobStatus
|
|
||||||
from selfprivacy_api.services.service import Service
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
|
|
||||||
|
|
||||||
def job_type_prefix(service: Service) -> str:
|
|
||||||
return f"services.{service.get_id()}"
|
|
||||||
|
|
||||||
|
|
||||||
def backup_job_type(service: Service) -> str:
|
|
||||||
return f"{job_type_prefix(service)}.backup"
|
|
||||||
|
|
||||||
|
|
||||||
def autobackup_job_type() -> str:
|
|
||||||
return "backups.autobackup"
|
|
||||||
|
|
||||||
|
|
||||||
def restore_job_type(service: Service) -> str:
|
|
||||||
return f"{job_type_prefix(service)}.restore"
|
|
||||||
|
|
||||||
|
|
||||||
def get_jobs_by_service(service: Service) -> List[Job]:
|
|
||||||
result = []
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if job.type_id.startswith(job_type_prefix(service)) and job.status in [
|
|
||||||
JobStatus.CREATED,
|
|
||||||
JobStatus.RUNNING,
|
|
||||||
]:
|
|
||||||
result.append(job)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def is_something_running_for(service: Service) -> bool:
|
|
||||||
running_jobs = [
|
|
||||||
job for job in get_jobs_by_service(service) if job.status == JobStatus.RUNNING
|
|
||||||
]
|
|
||||||
return len(running_jobs) != 0
|
|
||||||
|
|
||||||
|
|
||||||
def add_autobackup_job(services: List[Service]) -> Job:
|
|
||||||
service_names = [s.get_display_name() for s in services]
|
|
||||||
pretty_service_list: str = ", ".join(service_names)
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id=autobackup_job_type(),
|
|
||||||
name="Automatic backup",
|
|
||||||
description=f"Scheduled backup for services: {pretty_service_list}",
|
|
||||||
)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def add_backup_job(service: Service) -> Job:
|
|
||||||
if is_something_running_for(service):
|
|
||||||
message = (
|
|
||||||
f"Cannot start a backup of {service.get_id()}, another operation is running: "
|
|
||||||
+ get_jobs_by_service(service)[0].type_id
|
|
||||||
)
|
|
||||||
raise ValueError(message)
|
|
||||||
display_name = service.get_display_name()
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id=backup_job_type(service),
|
|
||||||
name=f"Backup {display_name}",
|
|
||||||
description=f"Backing up {display_name}",
|
|
||||||
)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def add_restore_job(snapshot: Snapshot) -> Job:
|
|
||||||
service = get_service_by_id(snapshot.service_name)
|
|
||||||
if service is None:
|
|
||||||
raise ValueError(f"no such service: {snapshot.service_name}")
|
|
||||||
if is_something_running_for(service):
|
|
||||||
message = (
|
|
||||||
f"Cannot start a restore of {service.get_id()}, another operation is running: "
|
|
||||||
+ get_jobs_by_service(service)[0].type_id
|
|
||||||
)
|
|
||||||
raise ValueError(message)
|
|
||||||
display_name = service.get_display_name()
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id=restore_job_type(service),
|
|
||||||
name=f"Restore {display_name}",
|
|
||||||
description=f"restoring {display_name} from {snapshot.id}",
|
|
||||||
)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def get_job_by_type(type_id: str) -> Optional[Job]:
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if job.type_id == type_id and job.status in [
|
|
||||||
JobStatus.CREATED,
|
|
||||||
JobStatus.RUNNING,
|
|
||||||
]:
|
|
||||||
return job
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_failed_job_by_type(type_id: str) -> Optional[Job]:
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if job.type_id == type_id and job.status == JobStatus.ERROR:
|
|
||||||
return job
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_backup_job(service: Service) -> Optional[Job]:
|
|
||||||
return get_job_by_type(backup_job_type(service))
|
|
||||||
|
|
||||||
|
|
||||||
def get_backup_fail(service: Service) -> Optional[Job]:
|
|
||||||
return get_failed_job_by_type(backup_job_type(service))
|
|
||||||
|
|
||||||
|
|
||||||
def get_restore_job(service: Service) -> Optional[Job]:
|
|
||||||
return get_job_by_type(restore_job_type(service))
|
|
|
@ -1,45 +0,0 @@
|
||||||
"""Handling of local secret used for encrypted backups.
|
|
||||||
Separated out for circular dependency reasons
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
import secrets
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
|
||||||
|
|
||||||
|
|
||||||
REDIS_KEY = "backup:local_secret"
|
|
||||||
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
|
|
||||||
|
|
||||||
class LocalBackupSecret:
|
|
||||||
@staticmethod
|
|
||||||
def get() -> str:
|
|
||||||
"""A secret string which backblaze/other clouds do not know.
|
|
||||||
Serves as encryption key.
|
|
||||||
"""
|
|
||||||
if not LocalBackupSecret.exists():
|
|
||||||
LocalBackupSecret.reset()
|
|
||||||
return redis.get(REDIS_KEY) # type: ignore
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set(secret: str):
|
|
||||||
redis.set(REDIS_KEY, secret)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset():
|
|
||||||
new_secret = LocalBackupSecret._generate()
|
|
||||||
LocalBackupSecret.set(new_secret)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _full_reset():
|
|
||||||
redis.delete(REDIS_KEY)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def exists() -> bool:
|
|
||||||
return redis.exists(REDIS_KEY) == 1
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _generate() -> str:
|
|
||||||
return secrets.token_urlsafe(256)
|
|
|
@ -1,31 +0,0 @@
|
||||||
from typing import Type
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
|
||||||
|
|
||||||
from selfprivacy_api.backup.providers.backblaze import Backblaze
|
|
||||||
from selfprivacy_api.backup.providers.memory import InMemoryBackup
|
|
||||||
from selfprivacy_api.backup.providers.local_file import LocalFileBackup
|
|
||||||
from selfprivacy_api.backup.providers.none import NoBackups
|
|
||||||
|
|
||||||
PROVIDER_MAPPING: dict[BackupProviderEnum, Type[AbstractBackupProvider]] = {
|
|
||||||
BackupProviderEnum.BACKBLAZE: Backblaze,
|
|
||||||
BackupProviderEnum.MEMORY: InMemoryBackup,
|
|
||||||
BackupProviderEnum.FILE: LocalFileBackup,
|
|
||||||
BackupProviderEnum.NONE: NoBackups,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_provider(
|
|
||||||
provider_type: BackupProviderEnum,
|
|
||||||
) -> Type[AbstractBackupProvider]:
|
|
||||||
if provider_type not in PROVIDER_MAPPING.keys():
|
|
||||||
raise LookupError("could not look up provider", provider_type)
|
|
||||||
return PROVIDER_MAPPING[provider_type]
|
|
||||||
|
|
||||||
|
|
||||||
def get_kind(provider: AbstractBackupProvider) -> str:
|
|
||||||
"""Get the kind of the provider in the form of a string"""
|
|
||||||
return provider.name.value
|
|
|
@ -1,11 +0,0 @@
|
||||||
from .provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Backblaze(AbstractBackupProvider):
|
|
||||||
backupper = ResticBackupper("--b2-account", "--b2-key", ":b2:")
|
|
||||||
|
|
||||||
name = BackupProviderEnum.BACKBLAZE
|
|
|
@ -1,11 +0,0 @@
|
||||||
from .provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LocalFileBackup(AbstractBackupProvider):
|
|
||||||
backupper = ResticBackupper("", "", ":local:")
|
|
||||||
|
|
||||||
name = BackupProviderEnum.FILE
|
|
|
@ -1,11 +0,0 @@
|
||||||
from .provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InMemoryBackup(AbstractBackupProvider):
|
|
||||||
backupper = ResticBackupper("", "", ":memory:")
|
|
||||||
|
|
||||||
name = BackupProviderEnum.MEMORY
|
|
|
@ -1,11 +0,0 @@
|
||||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.backuppers.none_backupper import NoneBackupper
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NoBackups(AbstractBackupProvider):
|
|
||||||
backupper = NoneBackupper()
|
|
||||||
|
|
||||||
name = BackupProviderEnum.NONE
|
|
|
@ -1,25 +0,0 @@
|
||||||
"""
|
|
||||||
An abstract class for BackBlaze, S3 etc.
|
|
||||||
It assumes that while some providers are supported via restic/rclone, others
|
|
||||||
may require different backends
|
|
||||||
"""
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
|
||||||
from selfprivacy_api.graphql.queries.providers import (
|
|
||||||
BackupProvider as BackupProviderEnum,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractBackupProvider(ABC):
|
|
||||||
backupper: AbstractBackupper
|
|
||||||
|
|
||||||
name: BackupProviderEnum
|
|
||||||
|
|
||||||
def __init__(self, login="", key="", location="", repo_id=""):
|
|
||||||
self.backupper.set_creds(login, key, location)
|
|
||||||
self.login = login
|
|
||||||
self.key = key
|
|
||||||
self.location = location
|
|
||||||
# We do not need to do anything with this one
|
|
||||||
# Just remember in case the app forgets
|
|
||||||
self.repo_id = repo_id
|
|
|
@ -1,198 +0,0 @@
|
||||||
"""
|
|
||||||
Module for storing backup related data in redis.
|
|
||||||
"""
|
|
||||||
from typing import List, Optional
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.models.backup.provider import BackupProviderModel
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import (
|
|
||||||
AutobackupQuotas,
|
|
||||||
_AutobackupQuotas,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
|
||||||
from selfprivacy_api.utils.redis_model_storage import (
|
|
||||||
store_model_as_hash,
|
|
||||||
hash_as_model,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
|
||||||
from selfprivacy_api.backup.providers import get_kind
|
|
||||||
|
|
||||||
REDIS_SNAPSHOTS_PREFIX = "backups:snapshots:"
|
|
||||||
REDIS_LAST_BACKUP_PREFIX = "backups:last-backed-up:"
|
|
||||||
REDIS_INITTED_CACHE = "backups:repo_initted"
|
|
||||||
|
|
||||||
REDIS_PROVIDER_KEY = "backups:provider"
|
|
||||||
REDIS_AUTOBACKUP_PERIOD_KEY = "backups:autobackup_period"
|
|
||||||
|
|
||||||
REDIS_AUTOBACKUP_QUOTAS_KEY = "backups:autobackup_quotas_key"
|
|
||||||
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
|
|
||||||
|
|
||||||
class Storage:
|
|
||||||
"""Static class for storing backup related data in redis"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset() -> None:
|
|
||||||
"""Deletes all backup related data from redis"""
|
|
||||||
redis.delete(REDIS_PROVIDER_KEY)
|
|
||||||
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
|
||||||
redis.delete(REDIS_INITTED_CACHE)
|
|
||||||
redis.delete(REDIS_AUTOBACKUP_QUOTAS_KEY)
|
|
||||||
|
|
||||||
prefixes_to_clean = [
|
|
||||||
REDIS_SNAPSHOTS_PREFIX,
|
|
||||||
REDIS_LAST_BACKUP_PREFIX,
|
|
||||||
]
|
|
||||||
|
|
||||||
for prefix in prefixes_to_clean:
|
|
||||||
for key in redis.keys(prefix + "*"):
|
|
||||||
redis.delete(key)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def invalidate_snapshot_storage() -> None:
|
|
||||||
"""Deletes all cached snapshots from redis"""
|
|
||||||
for key in redis.keys(REDIS_SNAPSHOTS_PREFIX + "*"):
|
|
||||||
redis.delete(key)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __last_backup_key(service_id: str) -> str:
|
|
||||||
return REDIS_LAST_BACKUP_PREFIX + service_id
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __snapshot_key(snapshot: Snapshot) -> str:
|
|
||||||
return REDIS_SNAPSHOTS_PREFIX + snapshot.id
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_last_backup_time(service_id: str) -> Optional[datetime]:
|
|
||||||
"""Returns last backup time for a service or None if it was never backed up"""
|
|
||||||
key = Storage.__last_backup_key(service_id)
|
|
||||||
if not redis.exists(key):
|
|
||||||
return None
|
|
||||||
|
|
||||||
snapshot = hash_as_model(redis, key, Snapshot)
|
|
||||||
if not snapshot:
|
|
||||||
return None
|
|
||||||
return snapshot.created_at
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def store_last_timestamp(service_id: str, snapshot: Snapshot) -> None:
|
|
||||||
"""Stores last backup time for a service"""
|
|
||||||
store_model_as_hash(
|
|
||||||
redis,
|
|
||||||
Storage.__last_backup_key(service_id),
|
|
||||||
snapshot,
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def cache_snapshot(snapshot: Snapshot) -> None:
|
|
||||||
"""Stores snapshot metadata in redis for caching purposes"""
|
|
||||||
snapshot_key = Storage.__snapshot_key(snapshot)
|
|
||||||
store_model_as_hash(redis, snapshot_key, snapshot)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def delete_cached_snapshot(snapshot: Snapshot) -> None:
|
|
||||||
"""Deletes snapshot metadata from redis"""
|
|
||||||
snapshot_key = Storage.__snapshot_key(snapshot)
|
|
||||||
redis.delete(snapshot_key)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_cached_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
|
||||||
"""Returns cached snapshot by id or None if it doesn't exist"""
|
|
||||||
key = REDIS_SNAPSHOTS_PREFIX + snapshot_id
|
|
||||||
if not redis.exists(key):
|
|
||||||
return None
|
|
||||||
return hash_as_model(redis, key, Snapshot)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_cached_snapshots() -> List[Snapshot]:
|
|
||||||
"""Returns all cached snapshots stored in redis"""
|
|
||||||
keys: list[str] = redis.keys(REDIS_SNAPSHOTS_PREFIX + "*") # type: ignore
|
|
||||||
result: list[Snapshot] = []
|
|
||||||
|
|
||||||
for key in keys:
|
|
||||||
snapshot = hash_as_model(redis, key, Snapshot)
|
|
||||||
if snapshot:
|
|
||||||
result.append(snapshot)
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def autobackup_period_minutes() -> Optional[int]:
|
|
||||||
"""None means autobackup is disabled"""
|
|
||||||
if not redis.exists(REDIS_AUTOBACKUP_PERIOD_KEY):
|
|
||||||
return None
|
|
||||||
return int(redis.get(REDIS_AUTOBACKUP_PERIOD_KEY)) # type: ignore
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def store_autobackup_period_minutes(minutes: int) -> None:
|
|
||||||
"""Set the new autobackup period in minutes"""
|
|
||||||
redis.set(REDIS_AUTOBACKUP_PERIOD_KEY, minutes)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def delete_backup_period() -> None:
|
|
||||||
"""Set the autobackup period to none, effectively disabling autobackup"""
|
|
||||||
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def store_provider(provider: AbstractBackupProvider) -> None:
|
|
||||||
"""Stores backup provider auth data in redis"""
|
|
||||||
model = BackupProviderModel(
|
|
||||||
kind=get_kind(provider),
|
|
||||||
login=provider.login,
|
|
||||||
key=provider.key,
|
|
||||||
location=provider.location,
|
|
||||||
repo_id=provider.repo_id,
|
|
||||||
)
|
|
||||||
store_model_as_hash(redis, REDIS_PROVIDER_KEY, model)
|
|
||||||
if Storage.load_provider() != model:
|
|
||||||
raise IOError("could not store the provider model: ", model.dict)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def load_provider() -> Optional[BackupProviderModel]:
|
|
||||||
"""Loads backup storage provider auth data from redis"""
|
|
||||||
provider_model = hash_as_model(
|
|
||||||
redis,
|
|
||||||
REDIS_PROVIDER_KEY,
|
|
||||||
BackupProviderModel,
|
|
||||||
)
|
|
||||||
return provider_model
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def has_init_mark() -> bool:
|
|
||||||
"""Returns True if the repository was initialized"""
|
|
||||||
if redis.exists(REDIS_INITTED_CACHE):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def mark_as_init():
|
|
||||||
"""Marks the repository as initialized"""
|
|
||||||
redis.set(REDIS_INITTED_CACHE, 1)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def mark_as_uninitted():
|
|
||||||
"""Marks the repository as initialized"""
|
|
||||||
redis.delete(REDIS_INITTED_CACHE)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
|
|
||||||
store_model_as_hash(redis, REDIS_AUTOBACKUP_QUOTAS_KEY, quotas.to_pydantic())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def autobackup_quotas() -> AutobackupQuotas:
|
|
||||||
quotas_model = hash_as_model(
|
|
||||||
redis, REDIS_AUTOBACKUP_QUOTAS_KEY, _AutobackupQuotas
|
|
||||||
)
|
|
||||||
if quotas_model is None:
|
|
||||||
unlimited_quotas = AutobackupQuotas(
|
|
||||||
last=-1,
|
|
||||||
daily=-1,
|
|
||||||
weekly=-1,
|
|
||||||
monthly=-1,
|
|
||||||
yearly=-1,
|
|
||||||
)
|
|
||||||
return unlimited_quotas
|
|
||||||
return AutobackupQuotas.from_pydantic(quotas_model) # pylint: disable=no-member
|
|
|
@ -1,117 +0,0 @@
|
||||||
"""
|
|
||||||
The tasks module contains the worker tasks that are used to back up and restore
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import (
|
|
||||||
RestoreStrategy,
|
|
||||||
BackupReason,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.models.backup.snapshot import Snapshot
|
|
||||||
from selfprivacy_api.utils.huey import huey
|
|
||||||
from huey import crontab
|
|
||||||
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
from selfprivacy_api.backup import Backups
|
|
||||||
from selfprivacy_api.backup.jobs import add_autobackup_job
|
|
||||||
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
|
||||||
|
|
||||||
|
|
||||||
SNAPSHOT_CACHE_TTL_HOURS = 6
|
|
||||||
|
|
||||||
|
|
||||||
def validate_datetime(dt: datetime) -> bool:
|
|
||||||
"""
|
|
||||||
Validates that it is time to back up.
|
|
||||||
Also ensures that the timezone-aware time is used.
|
|
||||||
"""
|
|
||||||
if dt.tzinfo is None:
|
|
||||||
return Backups.is_time_to_backup(dt.replace(tzinfo=timezone.utc))
|
|
||||||
return Backups.is_time_to_backup(dt)
|
|
||||||
|
|
||||||
|
|
||||||
# huey tasks need to return something
|
|
||||||
@huey.task()
|
|
||||||
def start_backup(service_id: str, reason: BackupReason = BackupReason.EXPLICIT) -> bool:
|
|
||||||
"""
|
|
||||||
The worker task that starts the backup process.
|
|
||||||
"""
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
raise ValueError(f"No such service: {service_id}")
|
|
||||||
Backups.back_up(service, reason)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def prune_autobackup_snapshots(job: Job) -> bool:
|
|
||||||
"""
|
|
||||||
Remove all autobackup snapshots that do not fit into quotas set
|
|
||||||
"""
|
|
||||||
Jobs.update(job, JobStatus.RUNNING)
|
|
||||||
try:
|
|
||||||
Backups.prune_all_autosnaps()
|
|
||||||
except Exception as e:
|
|
||||||
Jobs.update(job, JobStatus.ERROR, error=type(e).__name__ + ":" + str(e))
|
|
||||||
return False
|
|
||||||
|
|
||||||
Jobs.update(job, JobStatus.FINISHED)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def restore_snapshot(
|
|
||||||
snapshot: Snapshot,
|
|
||||||
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
The worker task that starts the restore process.
|
|
||||||
"""
|
|
||||||
Backups.restore_snapshot(snapshot, strategy)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def do_autobackup() -> None:
|
|
||||||
"""
|
|
||||||
Body of autobackup task, broken out to test it
|
|
||||||
For some reason, we cannot launch periodic huey tasks
|
|
||||||
inside tests
|
|
||||||
"""
|
|
||||||
time = datetime.utcnow().replace(tzinfo=timezone.utc)
|
|
||||||
services_to_back_up = Backups.services_to_back_up(time)
|
|
||||||
if not services_to_back_up:
|
|
||||||
return
|
|
||||||
job = add_autobackup_job(services_to_back_up)
|
|
||||||
|
|
||||||
progress_per_service = 100 // len(services_to_back_up)
|
|
||||||
progress = 0
|
|
||||||
Jobs.update(job, JobStatus.RUNNING, progress=progress)
|
|
||||||
|
|
||||||
for service in services_to_back_up:
|
|
||||||
try:
|
|
||||||
Backups.back_up(service, BackupReason.AUTO)
|
|
||||||
except Exception as error:
|
|
||||||
Jobs.update(
|
|
||||||
job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error=type(error).__name__ + ": " + str(error),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
progress = progress + progress_per_service
|
|
||||||
Jobs.update(job, JobStatus.RUNNING, progress=progress)
|
|
||||||
|
|
||||||
Jobs.update(job, JobStatus.FINISHED)
|
|
||||||
|
|
||||||
|
|
||||||
@huey.periodic_task(validate_datetime=validate_datetime)
|
|
||||||
def automatic_backup() -> None:
|
|
||||||
"""
|
|
||||||
The worker periodic task that starts the automatic backup process.
|
|
||||||
"""
|
|
||||||
do_autobackup()
|
|
||||||
|
|
||||||
|
|
||||||
@huey.periodic_task(crontab(hour="*/" + str(SNAPSHOT_CACHE_TTL_HOURS)))
|
|
||||||
def reload_snapshot_cache():
|
|
||||||
Backups.force_snapshot_cache_reload()
|
|
|
@ -1,35 +0,0 @@
|
||||||
import subprocess
|
|
||||||
from os.path import exists
|
|
||||||
from typing import Generator
|
|
||||||
|
|
||||||
|
|
||||||
def output_yielder(command) -> Generator[str, None, None]:
|
|
||||||
"""Note: If you break during iteration, it kills the process"""
|
|
||||||
with subprocess.Popen(
|
|
||||||
command,
|
|
||||||
shell=False,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
universal_newlines=True,
|
|
||||||
) as handle:
|
|
||||||
if handle is None or handle.stdout is None:
|
|
||||||
raise ValueError("could not run command: ", command)
|
|
||||||
|
|
||||||
try:
|
|
||||||
for line in iter(handle.stdout.readline, ""):
|
|
||||||
if "NOTICE:" not in line:
|
|
||||||
yield line
|
|
||||||
except GeneratorExit:
|
|
||||||
handle.kill()
|
|
||||||
|
|
||||||
|
|
||||||
def sync(src_path: str, dest_path: str):
|
|
||||||
"""a wrapper around rclone sync"""
|
|
||||||
|
|
||||||
if not exists(src_path):
|
|
||||||
raise ValueError("source dir for rclone sync must exist")
|
|
||||||
|
|
||||||
rclone_command = ["rclone", "sync", "-P", src_path, dest_path]
|
|
||||||
for raw_message in output_yielder(rclone_command):
|
|
||||||
if "ERROR" in raw_message:
|
|
||||||
raise ValueError(raw_message)
|
|
|
@ -1,30 +0,0 @@
|
||||||
from fastapi import Depends, HTTPException, status
|
|
||||||
from fastapi.security import APIKeyHeader
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from selfprivacy_api.actions.api_tokens import is_token_valid
|
|
||||||
|
|
||||||
|
|
||||||
class TokenHeader(BaseModel):
|
|
||||||
token: str
|
|
||||||
|
|
||||||
|
|
||||||
async def get_token_header(
|
|
||||||
token: str = Depends(APIKeyHeader(name="Authorization", auto_error=False))
|
|
||||||
) -> TokenHeader:
|
|
||||||
if token is None:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token not provided"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
token = token.replace("Bearer ", "")
|
|
||||||
if not is_token_valid(token):
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token"
|
|
||||||
)
|
|
||||||
return TokenHeader(token=token)
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_version() -> str:
|
|
||||||
"""Get API version"""
|
|
||||||
return "3.2.1"
|
|
|
@ -1,21 +0,0 @@
|
||||||
"""GraphQL API for SelfPrivacy."""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
from strawberry.permission import BasePermission
|
|
||||||
from strawberry.types import Info
|
|
||||||
|
|
||||||
from selfprivacy_api.actions.api_tokens import is_token_valid
|
|
||||||
|
|
||||||
|
|
||||||
class IsAuthenticated(BasePermission):
|
|
||||||
"""Is authenticated permission"""
|
|
||||||
|
|
||||||
message = "You must be authenticated to access this resource."
|
|
||||||
|
|
||||||
def has_permission(self, source: typing.Any, info: Info, **kwargs) -> bool:
|
|
||||||
token = info.context["request"].headers.get("Authorization")
|
|
||||||
if token is None:
|
|
||||||
token = info.context["request"].query_params.get("token")
|
|
||||||
if token is None:
|
|
||||||
return False
|
|
||||||
return is_token_valid(token.replace("Bearer ", ""))
|
|
|
@ -1,36 +0,0 @@
|
||||||
"""Backup"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
from enum import Enum
|
|
||||||
import strawberry
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class RestoreStrategy(Enum):
|
|
||||||
INPLACE = "INPLACE"
|
|
||||||
DOWNLOAD_VERIFY_OVERWRITE = "DOWNLOAD_VERIFY_OVERWRITE"
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class BackupReason(Enum):
|
|
||||||
EXPLICIT = "EXPLICIT"
|
|
||||||
AUTO = "AUTO"
|
|
||||||
PRE_RESTORE = "PRE_RESTORE"
|
|
||||||
|
|
||||||
|
|
||||||
class _AutobackupQuotas(BaseModel):
|
|
||||||
last: int
|
|
||||||
daily: int
|
|
||||||
weekly: int
|
|
||||||
monthly: int
|
|
||||||
yearly: int
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.experimental.pydantic.type(model=_AutobackupQuotas, all_fields=True)
|
|
||||||
class AutobackupQuotas:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.experimental.pydantic.input(model=_AutobackupQuotas, all_fields=True)
|
|
||||||
class AutobackupQuotasInput:
|
|
||||||
pass
|
|
|
@ -1,15 +0,0 @@
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: use https://strawberry.rocks/docs/integrations/pydantic when it is stable
|
|
||||||
@strawberry.type
|
|
||||||
class DnsRecord:
|
|
||||||
"""DNS record"""
|
|
||||||
|
|
||||||
record_type: str
|
|
||||||
name: str
|
|
||||||
content: str
|
|
||||||
ttl: int
|
|
||||||
priority: typing.Optional[int]
|
|
||||||
display_name: str
|
|
|
@ -1,51 +0,0 @@
|
||||||
"""Jobs status"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import datetime
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.jobs import Job, Jobs
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ApiJob:
|
|
||||||
"""Job type for GraphQL."""
|
|
||||||
|
|
||||||
uid: str
|
|
||||||
type_id: str
|
|
||||||
name: str
|
|
||||||
description: str
|
|
||||||
status: str
|
|
||||||
status_text: typing.Optional[str]
|
|
||||||
progress: typing.Optional[int]
|
|
||||||
created_at: datetime.datetime
|
|
||||||
updated_at: datetime.datetime
|
|
||||||
finished_at: typing.Optional[datetime.datetime]
|
|
||||||
error: typing.Optional[str]
|
|
||||||
result: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
def job_to_api_job(job: Job) -> ApiJob:
|
|
||||||
"""Convert a Job from jobs controller to a GraphQL ApiJob."""
|
|
||||||
return ApiJob(
|
|
||||||
uid=str(job.uid),
|
|
||||||
type_id=job.type_id,
|
|
||||||
name=job.name,
|
|
||||||
description=job.description,
|
|
||||||
status=job.status.name,
|
|
||||||
status_text=job.status_text,
|
|
||||||
progress=job.progress,
|
|
||||||
created_at=job.created_at,
|
|
||||||
updated_at=job.updated_at,
|
|
||||||
finished_at=job.finished_at,
|
|
||||||
error=job.error,
|
|
||||||
result=job.result,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_job_by_id(job_id: str) -> typing.Optional[ApiJob]:
|
|
||||||
"""Get a job for GraphQL by its ID."""
|
|
||||||
job = Jobs.get_job(job_id)
|
|
||||||
if job is None:
|
|
||||||
return None
|
|
||||||
return job_to_api_job(job)
|
|
|
@ -1,182 +0,0 @@
|
||||||
from enum import Enum
|
|
||||||
from typing import Optional, List
|
|
||||||
import datetime
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
|
||||||
from selfprivacy_api.graphql.common_types.dns import DnsRecord
|
|
||||||
|
|
||||||
from selfprivacy_api.services import get_service_by_id, get_services_by_location
|
|
||||||
from selfprivacy_api.services import Service as ServiceInterface
|
|
||||||
from selfprivacy_api.services import ServiceDnsRecord
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
|
||||||
from selfprivacy_api.utils.network import get_ip4, get_ip6
|
|
||||||
|
|
||||||
|
|
||||||
def get_usages(root: "StorageVolume") -> list["StorageUsageInterface"]:
|
|
||||||
"""Get usages of a volume"""
|
|
||||||
return [
|
|
||||||
ServiceStorageUsage(
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
title=service.get_display_name(),
|
|
||||||
used_space=str(service.get_storage_usage()),
|
|
||||||
volume=get_volume_by_id(service.get_drive()),
|
|
||||||
)
|
|
||||||
for service in get_services_by_location(root.name)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class StorageVolume:
|
|
||||||
"""Stats and basic info about a volume or a system disk."""
|
|
||||||
|
|
||||||
total_space: str
|
|
||||||
free_space: str
|
|
||||||
used_space: str
|
|
||||||
root: bool
|
|
||||||
name: str
|
|
||||||
model: Optional[str]
|
|
||||||
serial: Optional[str]
|
|
||||||
type: str
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def usages(self) -> list["StorageUsageInterface"]:
|
|
||||||
"""Get usages of a volume"""
|
|
||||||
return get_usages(self)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.interface
|
|
||||||
class StorageUsageInterface:
|
|
||||||
used_space: str
|
|
||||||
volume: Optional[StorageVolume]
|
|
||||||
title: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ServiceStorageUsage(StorageUsageInterface):
|
|
||||||
"""Storage usage for a service"""
|
|
||||||
|
|
||||||
service: Optional["Service"]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class ServiceStatusEnum(Enum):
|
|
||||||
ACTIVE = "ACTIVE"
|
|
||||||
RELOADING = "RELOADING"
|
|
||||||
INACTIVE = "INACTIVE"
|
|
||||||
FAILED = "FAILED"
|
|
||||||
ACTIVATING = "ACTIVATING"
|
|
||||||
DEACTIVATING = "DEACTIVATING"
|
|
||||||
OFF = "OFF"
|
|
||||||
|
|
||||||
|
|
||||||
def get_storage_usage(root: "Service") -> ServiceStorageUsage:
|
|
||||||
"""Get storage usage for a service"""
|
|
||||||
service = get_service_by_id(root.id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceStorageUsage(
|
|
||||||
service=service,
|
|
||||||
title="Not found",
|
|
||||||
used_space="0",
|
|
||||||
volume=get_volume_by_id("sda1"),
|
|
||||||
)
|
|
||||||
return ServiceStorageUsage(
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
title=service.get_display_name(),
|
|
||||||
used_space=str(service.get_storage_usage()),
|
|
||||||
volume=get_volume_by_id(service.get_drive()),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: This won't be needed when deriving DnsRecord via strawberry pydantic integration
|
|
||||||
# https://strawberry.rocks/docs/integrations/pydantic
|
|
||||||
# Remove when the link above says it got stable.
|
|
||||||
def service_dns_to_graphql(record: ServiceDnsRecord) -> DnsRecord:
|
|
||||||
return DnsRecord(
|
|
||||||
record_type=record.type,
|
|
||||||
name=record.name,
|
|
||||||
content=record.content,
|
|
||||||
ttl=record.ttl,
|
|
||||||
priority=record.priority,
|
|
||||||
display_name=record.display_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Service:
|
|
||||||
id: str
|
|
||||||
display_name: str
|
|
||||||
description: str
|
|
||||||
svg_icon: str
|
|
||||||
is_movable: bool
|
|
||||||
is_required: bool
|
|
||||||
is_enabled: bool
|
|
||||||
can_be_backed_up: bool
|
|
||||||
backup_description: str
|
|
||||||
status: ServiceStatusEnum
|
|
||||||
url: Optional[str]
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def dns_records(self) -> Optional[List[DnsRecord]]:
|
|
||||||
service = get_service_by_id(self.id)
|
|
||||||
if service is None:
|
|
||||||
raise LookupError(f"no service {self.id}. Should be unreachable")
|
|
||||||
|
|
||||||
raw_records = service.get_dns_records(get_ip4(), get_ip6())
|
|
||||||
dns_records = [service_dns_to_graphql(record) for record in raw_records]
|
|
||||||
return dns_records
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def storage_usage(self) -> ServiceStorageUsage:
|
|
||||||
"""Get storage usage for a service"""
|
|
||||||
return get_storage_usage(self)
|
|
||||||
|
|
||||||
# TODO: fill this
|
|
||||||
@strawberry.field
|
|
||||||
def backup_snapshots(self) -> Optional[List["SnapshotInfo"]]:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SnapshotInfo:
|
|
||||||
id: str
|
|
||||||
service: Service
|
|
||||||
created_at: datetime.datetime
|
|
||||||
reason: BackupReason
|
|
||||||
|
|
||||||
|
|
||||||
def service_to_graphql_service(service: ServiceInterface) -> Service:
|
|
||||||
"""Convert service to graphql service"""
|
|
||||||
return Service(
|
|
||||||
id=service.get_id(),
|
|
||||||
display_name=service.get_display_name(),
|
|
||||||
description=service.get_description(),
|
|
||||||
svg_icon=service.get_svg_icon(),
|
|
||||||
is_movable=service.is_movable(),
|
|
||||||
is_required=service.is_required(),
|
|
||||||
is_enabled=service.is_enabled(),
|
|
||||||
can_be_backed_up=service.can_be_backed_up(),
|
|
||||||
backup_description=service.get_backup_description(),
|
|
||||||
status=ServiceStatusEnum(service.get_status().value),
|
|
||||||
url=service.get_url(),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_volume_by_id(volume_id: str) -> Optional[StorageVolume]:
|
|
||||||
"""Get volume by id"""
|
|
||||||
volume = BlockDevices().get_block_device(volume_id)
|
|
||||||
if volume is None:
|
|
||||||
return None
|
|
||||||
return StorageVolume(
|
|
||||||
total_space=str(volume.fssize)
|
|
||||||
if volume.fssize is not None
|
|
||||||
else str(volume.size),
|
|
||||||
free_space=str(volume.fsavail),
|
|
||||||
used_space=str(volume.fsused),
|
|
||||||
root=volume.name == "sda1",
|
|
||||||
name=volume.name,
|
|
||||||
model=volume.model,
|
|
||||||
serial=volume.serial,
|
|
||||||
type=volume.type,
|
|
||||||
)
|
|
|
@ -1,55 +0,0 @@
|
||||||
import typing
|
|
||||||
from enum import Enum
|
|
||||||
import strawberry
|
|
||||||
import selfprivacy_api.actions.users as users_actions
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
MutationReturnInterface,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class UserType(Enum):
|
|
||||||
NORMAL = "NORMAL"
|
|
||||||
PRIMARY = "PRIMARY"
|
|
||||||
ROOT = "ROOT"
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class User:
|
|
||||||
user_type: UserType
|
|
||||||
username: str
|
|
||||||
# userHomeFolderspace: UserHomeFolderUsage
|
|
||||||
ssh_keys: typing.List[str] = strawberry.field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class UserMutationReturn(MutationReturnInterface):
|
|
||||||
"""Return type for user mutation"""
|
|
||||||
|
|
||||||
user: typing.Optional[User] = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_user_by_username(username: str) -> typing.Optional[User]:
|
|
||||||
user = users_actions.get_user_by_username(username)
|
|
||||||
if user is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return User(
|
|
||||||
user_type=UserType(user.origin.value),
|
|
||||||
username=user.username,
|
|
||||||
ssh_keys=user.ssh_keys,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_users() -> typing.List[User]:
|
|
||||||
"""Get users"""
|
|
||||||
users = users_actions.get_users(exclude_root=True)
|
|
||||||
return [
|
|
||||||
User(
|
|
||||||
user_type=UserType(user.origin.value),
|
|
||||||
username=user.username,
|
|
||||||
ssh_keys=user.ssh_keys,
|
|
||||||
)
|
|
||||||
for user in users
|
|
||||||
]
|
|
|
@ -1,219 +0,0 @@
|
||||||
"""API access mutations"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import datetime
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from strawberry.types import Info
|
|
||||||
from selfprivacy_api.actions.api_tokens import (
|
|
||||||
CannotDeleteCallerException,
|
|
||||||
InvalidExpirationDate,
|
|
||||||
InvalidUsesLeft,
|
|
||||||
NotFoundException,
|
|
||||||
delete_api_token,
|
|
||||||
get_new_api_recovery_key,
|
|
||||||
use_mnemonic_recovery_token,
|
|
||||||
refresh_api_token,
|
|
||||||
delete_new_device_auth_token,
|
|
||||||
get_new_device_auth_token,
|
|
||||||
use_new_device_auth_token,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericMutationReturn,
|
|
||||||
MutationReturnInterface,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ApiKeyMutationReturn(MutationReturnInterface):
|
|
||||||
key: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeviceApiTokenMutationReturn(MutationReturnInterface):
|
|
||||||
token: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class RecoveryKeyLimitsInput:
|
|
||||||
"""Recovery key limits input"""
|
|
||||||
|
|
||||||
expiration_date: typing.Optional[datetime.datetime] = None
|
|
||||||
uses: typing.Optional[int] = None
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class UseRecoveryKeyInput:
|
|
||||||
"""Use recovery key input"""
|
|
||||||
|
|
||||||
key: str
|
|
||||||
deviceName: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class UseNewDeviceKeyInput:
|
|
||||||
"""Use new device key input"""
|
|
||||||
|
|
||||||
key: str
|
|
||||||
deviceName: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ApiMutations:
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def get_new_recovery_api_key(
|
|
||||||
self, limits: typing.Optional[RecoveryKeyLimitsInput] = None
|
|
||||||
) -> ApiKeyMutationReturn:
|
|
||||||
"""Generate recovery key"""
|
|
||||||
if limits is None:
|
|
||||||
limits = RecoveryKeyLimitsInput()
|
|
||||||
try:
|
|
||||||
key = get_new_api_recovery_key(limits.expiration_date, limits.uses)
|
|
||||||
except InvalidExpirationDate:
|
|
||||||
return ApiKeyMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Expiration date must be in the future",
|
|
||||||
code=400,
|
|
||||||
key=None,
|
|
||||||
)
|
|
||||||
except InvalidUsesLeft:
|
|
||||||
return ApiKeyMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Uses must be greater than 0",
|
|
||||||
code=400,
|
|
||||||
key=None,
|
|
||||||
)
|
|
||||||
return ApiKeyMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Recovery key generated",
|
|
||||||
code=200,
|
|
||||||
key=key,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation()
|
|
||||||
def use_recovery_api_key(
|
|
||||||
self, input: UseRecoveryKeyInput
|
|
||||||
) -> DeviceApiTokenMutationReturn:
|
|
||||||
"""Use recovery key"""
|
|
||||||
token = use_mnemonic_recovery_token(input.key, input.deviceName)
|
|
||||||
if token is not None:
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Recovery key used",
|
|
||||||
code=200,
|
|
||||||
token=token,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Recovery key not found",
|
|
||||||
code=404,
|
|
||||||
token=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def refresh_device_api_token(self, info: Info) -> DeviceApiTokenMutationReturn:
|
|
||||||
"""Refresh device api token"""
|
|
||||||
token_string = (
|
|
||||||
info.context["request"]
|
|
||||||
.headers.get("Authorization", "")
|
|
||||||
.replace("Bearer ", "")
|
|
||||||
)
|
|
||||||
if token_string is None:
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Token not found",
|
|
||||||
code=404,
|
|
||||||
token=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
new_token = refresh_api_token(token_string)
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Token refreshed",
|
|
||||||
code=200,
|
|
||||||
token=new_token,
|
|
||||||
)
|
|
||||||
except NotFoundException:
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Token not found",
|
|
||||||
code=404,
|
|
||||||
token=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def delete_device_api_token(self, device: str, info: Info) -> GenericMutationReturn:
|
|
||||||
"""Delete device api token"""
|
|
||||||
self_token = (
|
|
||||||
info.context["request"]
|
|
||||||
.headers.get("Authorization", "")
|
|
||||||
.replace("Bearer ", "")
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
delete_api_token(self_token, device)
|
|
||||||
except NotFoundException:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Token not found",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except CannotDeleteCallerException:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Cannot delete caller token",
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Token deleted",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def get_new_device_api_key(self) -> ApiKeyMutationReturn:
|
|
||||||
"""Generate device api key"""
|
|
||||||
key = get_new_device_auth_token()
|
|
||||||
return ApiKeyMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Device api key generated",
|
|
||||||
code=200,
|
|
||||||
key=key,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def invalidate_new_device_api_key(self) -> GenericMutationReturn:
|
|
||||||
"""Invalidate new device api key"""
|
|
||||||
delete_new_device_auth_token()
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="New device key deleted",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation()
|
|
||||||
def authorize_with_new_device_api_key(
|
|
||||||
self, input: UseNewDeviceKeyInput
|
|
||||||
) -> DeviceApiTokenMutationReturn:
|
|
||||||
"""Authorize with new device api key"""
|
|
||||||
token = use_new_device_auth_token(input.key, input.deviceName)
|
|
||||||
if token is None:
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Token not found",
|
|
||||||
code=404,
|
|
||||||
token=None,
|
|
||||||
)
|
|
||||||
return DeviceApiTokenMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Token used",
|
|
||||||
code=200,
|
|
||||||
token=token,
|
|
||||||
)
|
|
|
@ -1,241 +0,0 @@
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.jobs import Jobs
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericMutationReturn,
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
MutationReturnInterface,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.queries.backup import BackupConfiguration
|
|
||||||
from selfprivacy_api.graphql.queries.backup import Backup
|
|
||||||
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import (
|
|
||||||
AutobackupQuotasInput,
|
|
||||||
RestoreStrategy,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.backup import Backups
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
from selfprivacy_api.backup.tasks import (
|
|
||||||
start_backup,
|
|
||||||
restore_snapshot,
|
|
||||||
prune_autobackup_snapshots,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class InitializeRepositoryInput:
|
|
||||||
"""Initialize repository input"""
|
|
||||||
|
|
||||||
provider: BackupProvider
|
|
||||||
# The following field may become optional for other providers?
|
|
||||||
# Backblaze takes bucket id and name
|
|
||||||
location_id: str
|
|
||||||
location_name: str
|
|
||||||
# Key ID and key for Backblaze
|
|
||||||
login: str
|
|
||||||
password: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class GenericBackupConfigReturn(MutationReturnInterface):
|
|
||||||
"""Generic backup config return"""
|
|
||||||
|
|
||||||
configuration: typing.Optional[BackupConfiguration]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class BackupMutations:
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def initialize_repository(
|
|
||||||
self, repository: InitializeRepositoryInput
|
|
||||||
) -> GenericBackupConfigReturn:
|
|
||||||
"""Initialize a new repository"""
|
|
||||||
Backups.set_provider(
|
|
||||||
kind=repository.provider,
|
|
||||||
login=repository.login,
|
|
||||||
key=repository.password,
|
|
||||||
location=repository.location_name,
|
|
||||||
repo_id=repository.location_id,
|
|
||||||
)
|
|
||||||
Backups.init_repo()
|
|
||||||
return GenericBackupConfigReturn(
|
|
||||||
success=True,
|
|
||||||
message="",
|
|
||||||
code=200,
|
|
||||||
configuration=Backup().configuration(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def remove_repository(self) -> GenericBackupConfigReturn:
|
|
||||||
"""Remove repository"""
|
|
||||||
Backups.reset()
|
|
||||||
return GenericBackupConfigReturn(
|
|
||||||
success=True,
|
|
||||||
message="",
|
|
||||||
code=200,
|
|
||||||
configuration=Backup().configuration(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def set_autobackup_period(
|
|
||||||
self, period: typing.Optional[int] = None
|
|
||||||
) -> GenericBackupConfigReturn:
|
|
||||||
"""Set autobackup period. None is to disable autobackup"""
|
|
||||||
if period is not None:
|
|
||||||
Backups.set_autobackup_period_minutes(period)
|
|
||||||
else:
|
|
||||||
Backups.set_autobackup_period_minutes(0)
|
|
||||||
|
|
||||||
return GenericBackupConfigReturn(
|
|
||||||
success=True,
|
|
||||||
message="",
|
|
||||||
code=200,
|
|
||||||
configuration=Backup().configuration(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def set_autobackup_quotas(
|
|
||||||
self, quotas: AutobackupQuotasInput
|
|
||||||
) -> GenericBackupConfigReturn:
|
|
||||||
"""
|
|
||||||
Set autobackup quotas.
|
|
||||||
Values <=0 for any timeframe mean no limits for that timeframe.
|
|
||||||
To disable autobackup use autobackup period setting, not this mutation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
job = Jobs.add(
|
|
||||||
name="Trimming autobackup snapshots",
|
|
||||||
type_id="backups.autobackup_trimming",
|
|
||||||
description="Pruning the excessive snapshots after the new autobackup quotas are set",
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
Backups.set_autobackup_quotas(quotas)
|
|
||||||
# this task is async and can fail with only a job to report the error
|
|
||||||
prune_autobackup_snapshots(job)
|
|
||||||
return GenericBackupConfigReturn(
|
|
||||||
success=True,
|
|
||||||
message="",
|
|
||||||
code=200,
|
|
||||||
configuration=Backup().configuration(),
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return GenericBackupConfigReturn(
|
|
||||||
success=False,
|
|
||||||
message=type(e).__name__ + ":" + str(e),
|
|
||||||
code=400,
|
|
||||||
configuration=Backup().configuration(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def start_backup(self, service_id: str) -> GenericJobMutationReturn:
|
|
||||||
"""Start backup"""
|
|
||||||
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=300,
|
|
||||||
message=f"nonexistent service: {service_id}",
|
|
||||||
job=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
job = add_backup_job(service)
|
|
||||||
start_backup(service_id)
|
|
||||||
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Backup job queued",
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def restore_backup(
|
|
||||||
self,
|
|
||||||
snapshot_id: str,
|
|
||||||
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
|
||||||
) -> GenericJobMutationReturn:
|
|
||||||
"""Restore backup"""
|
|
||||||
snap = Backups.get_snapshot_by_id(snapshot_id)
|
|
||||||
if snap is None:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=404,
|
|
||||||
message=f"No such snapshot: {snapshot_id}",
|
|
||||||
job=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
service = get_service_by_id(snap.service_name)
|
|
||||||
if service is None:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=404,
|
|
||||||
message=f"nonexistent service: {snap.service_name}",
|
|
||||||
job=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
job = add_restore_job(snap)
|
|
||||||
except ValueError as error:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=400,
|
|
||||||
message=str(error),
|
|
||||||
job=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
restore_snapshot(snap, strategy)
|
|
||||||
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="restore job created",
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def forget_snapshot(self, snapshot_id: str) -> GenericMutationReturn:
|
|
||||||
"""Forget a snapshot.
|
|
||||||
Makes it inaccessible from the server.
|
|
||||||
After some time, the data (encrypted) will not be recoverable
|
|
||||||
from the backup server too, but not immediately"""
|
|
||||||
|
|
||||||
snap = Backups.get_snapshot_by_id(snapshot_id)
|
|
||||||
if snap is None:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=404,
|
|
||||||
message=f"snapshot {snapshot_id} not found",
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
Backups.forget_snapshot(snap)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="",
|
|
||||||
)
|
|
||||||
except Exception as error:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=400,
|
|
||||||
message=str(error),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def force_snapshots_reload(self) -> GenericMutationReturn:
|
|
||||||
"""Force snapshots reload"""
|
|
||||||
Backups.force_snapshot_cache_reload()
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="",
|
|
||||||
)
|
|
|
@ -1,216 +0,0 @@
|
||||||
"""Deprecated mutations
|
|
||||||
|
|
||||||
There was made a mistake, where mutations were not grouped, and were instead
|
|
||||||
placed in the root of mutations schema. In this file, we import all the
|
|
||||||
mutations from and provide them to the root for backwards compatibility.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.common_types.user import UserMutationReturn
|
|
||||||
from selfprivacy_api.graphql.mutations.api_mutations import (
|
|
||||||
ApiKeyMutationReturn,
|
|
||||||
ApiMutations,
|
|
||||||
DeviceApiTokenMutationReturn,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
GenericMutationReturn,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.services_mutations import (
|
|
||||||
ServiceJobMutationReturn,
|
|
||||||
ServiceMutationReturn,
|
|
||||||
ServicesMutations,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.system_mutations import (
|
|
||||||
AutoUpgradeSettingsMutationReturn,
|
|
||||||
SystemMutations,
|
|
||||||
TimezoneMutationReturn,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
|
||||||
|
|
||||||
|
|
||||||
def deprecated_mutation(func, group, auth=True):
|
|
||||||
return strawberry.mutation(
|
|
||||||
resolver=func,
|
|
||||||
permission_classes=[IsAuthenticated] if auth else [],
|
|
||||||
deprecation_reason=f"Use `{group}.{func.__name__}` instead",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedApiMutations:
|
|
||||||
get_new_recovery_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.get_new_recovery_api_key,
|
|
||||||
"api",
|
|
||||||
)
|
|
||||||
|
|
||||||
use_recovery_api_key: DeviceApiTokenMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.use_recovery_api_key,
|
|
||||||
"api",
|
|
||||||
auth=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
refresh_device_api_token: DeviceApiTokenMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.refresh_device_api_token,
|
|
||||||
"api",
|
|
||||||
)
|
|
||||||
|
|
||||||
delete_device_api_token: GenericMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.delete_device_api_token,
|
|
||||||
"api",
|
|
||||||
)
|
|
||||||
|
|
||||||
get_new_device_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.get_new_device_api_key,
|
|
||||||
"api",
|
|
||||||
)
|
|
||||||
|
|
||||||
invalidate_new_device_api_key: GenericMutationReturn = deprecated_mutation(
|
|
||||||
ApiMutations.invalidate_new_device_api_key,
|
|
||||||
"api",
|
|
||||||
)
|
|
||||||
|
|
||||||
authorize_with_new_device_api_key: DeviceApiTokenMutationReturn = (
|
|
||||||
deprecated_mutation(
|
|
||||||
ApiMutations.authorize_with_new_device_api_key,
|
|
||||||
"api",
|
|
||||||
auth=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedSystemMutations:
|
|
||||||
change_timezone: TimezoneMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.change_timezone,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
change_auto_upgrade_settings: AutoUpgradeSettingsMutationReturn = (
|
|
||||||
deprecated_mutation(
|
|
||||||
SystemMutations.change_auto_upgrade_settings,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
run_system_rebuild: GenericMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.run_system_rebuild,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
run_system_rollback: GenericMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.run_system_rollback,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
run_system_upgrade: GenericMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.run_system_upgrade,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
reboot_system: GenericMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.reboot_system,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
pull_repository_changes: GenericMutationReturn = deprecated_mutation(
|
|
||||||
SystemMutations.pull_repository_changes,
|
|
||||||
"system",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedUsersMutations:
|
|
||||||
create_user: UserMutationReturn = deprecated_mutation(
|
|
||||||
UsersMutations.create_user,
|
|
||||||
"users",
|
|
||||||
)
|
|
||||||
|
|
||||||
delete_user: GenericMutationReturn = deprecated_mutation(
|
|
||||||
UsersMutations.delete_user,
|
|
||||||
"users",
|
|
||||||
)
|
|
||||||
|
|
||||||
update_user: UserMutationReturn = deprecated_mutation(
|
|
||||||
UsersMutations.update_user,
|
|
||||||
"users",
|
|
||||||
)
|
|
||||||
|
|
||||||
add_ssh_key: UserMutationReturn = deprecated_mutation(
|
|
||||||
UsersMutations.add_ssh_key,
|
|
||||||
"users",
|
|
||||||
)
|
|
||||||
|
|
||||||
remove_ssh_key: UserMutationReturn = deprecated_mutation(
|
|
||||||
UsersMutations.remove_ssh_key,
|
|
||||||
"users",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedStorageMutations:
|
|
||||||
resize_volume: GenericMutationReturn = deprecated_mutation(
|
|
||||||
StorageMutations.resize_volume,
|
|
||||||
"storage",
|
|
||||||
)
|
|
||||||
|
|
||||||
mount_volume: GenericMutationReturn = deprecated_mutation(
|
|
||||||
StorageMutations.mount_volume,
|
|
||||||
"storage",
|
|
||||||
)
|
|
||||||
|
|
||||||
unmount_volume: GenericMutationReturn = deprecated_mutation(
|
|
||||||
StorageMutations.unmount_volume,
|
|
||||||
"storage",
|
|
||||||
)
|
|
||||||
|
|
||||||
migrate_to_binds: GenericJobMutationReturn = deprecated_mutation(
|
|
||||||
StorageMutations.migrate_to_binds,
|
|
||||||
"storage",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedServicesMutations:
|
|
||||||
enable_service: ServiceMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.enable_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
disable_service: ServiceMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.disable_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
stop_service: ServiceMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.stop_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
start_service: ServiceMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.start_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
restart_service: ServiceMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.restart_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
move_service: ServiceJobMutationReturn = deprecated_mutation(
|
|
||||||
ServicesMutations.move_service,
|
|
||||||
"services",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class DeprecatedJobMutations:
|
|
||||||
remove_job: GenericMutationReturn = deprecated_mutation(
|
|
||||||
JobMutations.remove_job,
|
|
||||||
"jobs",
|
|
||||||
)
|
|
|
@ -1,28 +0,0 @@
|
||||||
"""Manipulate jobs"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.jobs import Jobs
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class JobMutations:
|
|
||||||
"""Mutations related to jobs"""
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def remove_job(self, job_id: str) -> GenericMutationReturn:
|
|
||||||
"""Remove a job from the queue"""
|
|
||||||
result = Jobs.remove_by_uid(job_id)
|
|
||||||
if result:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Job removed",
|
|
||||||
)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
code=404,
|
|
||||||
message="Job not found",
|
|
||||||
)
|
|
|
@ -1,21 +0,0 @@
|
||||||
import strawberry
|
|
||||||
import typing
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import ApiJob
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.interface
|
|
||||||
class MutationReturnInterface:
|
|
||||||
success: bool
|
|
||||||
message: str
|
|
||||||
code: int
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class GenericMutationReturn(MutationReturnInterface):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class GenericJobMutationReturn(MutationReturnInterface):
|
|
||||||
job: typing.Optional[ApiJob] = None
|
|
|
@ -1,217 +0,0 @@
|
||||||
"""Services mutations"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
|
||||||
from selfprivacy_api.jobs import JobStatus
|
|
||||||
|
|
||||||
from traceback import format_tb as format_traceback
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
GenericMutationReturn,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.common_types.service import (
|
|
||||||
Service,
|
|
||||||
service_to_graphql_service,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.actions.services import (
|
|
||||||
move_service,
|
|
||||||
ServiceNotFoundError,
|
|
||||||
VolumeNotFoundError,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ServiceMutationReturn(GenericMutationReturn):
|
|
||||||
"""Service mutation return type."""
|
|
||||||
|
|
||||||
service: typing.Optional[Service] = None
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class MoveServiceInput:
|
|
||||||
"""Move service input type."""
|
|
||||||
|
|
||||||
service_id: str
|
|
||||||
location: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ServiceJobMutationReturn(GenericJobMutationReturn):
|
|
||||||
"""Service job mutation return type."""
|
|
||||||
|
|
||||||
service: typing.Optional[Service] = None
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ServicesMutations:
|
|
||||||
"""Services mutations."""
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def enable_service(self, service_id: str) -> ServiceMutationReturn:
|
|
||||||
"""Enable service."""
|
|
||||||
try:
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Service not found.",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
service.enable()
|
|
||||||
except Exception as e:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=pretty_error(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service enabled.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def disable_service(self, service_id: str) -> ServiceMutationReturn:
|
|
||||||
"""Disable service."""
|
|
||||||
try:
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Service not found.",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
service.disable()
|
|
||||||
except Exception as e:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=pretty_error(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service disabled.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def stop_service(self, service_id: str) -> ServiceMutationReturn:
|
|
||||||
"""Stop service."""
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Service not found.",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
service.stop()
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service stopped.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def start_service(self, service_id: str) -> ServiceMutationReturn:
|
|
||||||
"""Start service."""
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Service not found.",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
service.start()
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service started.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def restart_service(self, service_id: str) -> ServiceMutationReturn:
|
|
||||||
"""Restart service."""
|
|
||||||
service = get_service_by_id(service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Service not found.",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
service.restart()
|
|
||||||
return ServiceMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service restarted.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def move_service(self, input: MoveServiceInput) -> ServiceJobMutationReturn:
|
|
||||||
"""Move service."""
|
|
||||||
# We need a service instance for a reply later
|
|
||||||
service = get_service_by_id(input.service_id)
|
|
||||||
if service is None:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=f"Service does not exist: {input.service_id}",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
job = move_service(input.service_id, input.location)
|
|
||||||
|
|
||||||
except (ServiceNotFoundError, VolumeNotFoundError) as e:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=pretty_error(e),
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=pretty_error(e),
|
|
||||||
code=400,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
)
|
|
||||||
|
|
||||||
if job.status in [JobStatus.CREATED, JobStatus.RUNNING]:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Started moving the service.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
elif job.status == JobStatus.FINISHED:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Service moved.",
|
|
||||||
code=200,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return ServiceJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=f"While moving service and performing the step '{job.status_text}', error occured: {job.error}",
|
|
||||||
code=400,
|
|
||||||
service=service_to_graphql_service(service),
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def pretty_error(e: Exception) -> str:
|
|
||||||
traceback = "/r".join(format_traceback(e.__traceback__))
|
|
||||||
return type(e).__name__ + ": " + str(e) + ": " + traceback
|
|
|
@ -1,102 +0,0 @@
|
||||||
"""Storage devices mutations"""
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
|
||||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
GenericMutationReturn,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.jobs.migrate_to_binds import (
|
|
||||||
BindMigrationConfig,
|
|
||||||
is_bind_migrated,
|
|
||||||
start_bind_migration,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class MigrateToBindsInput:
|
|
||||||
"""Migrate to binds input"""
|
|
||||||
|
|
||||||
email_block_device: str
|
|
||||||
bitwarden_block_device: str
|
|
||||||
gitea_block_device: str
|
|
||||||
nextcloud_block_device: str
|
|
||||||
pleroma_block_device: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class StorageMutations:
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def resize_volume(self, name: str) -> GenericMutationReturn:
|
|
||||||
"""Resize volume"""
|
|
||||||
volume = BlockDevices().get_block_device(name)
|
|
||||||
if volume is None:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False, code=404, message="Volume not found"
|
|
||||||
)
|
|
||||||
volume.resize()
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True, code=200, message="Volume resize started"
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def mount_volume(self, name: str) -> GenericMutationReturn:
|
|
||||||
"""Mount volume"""
|
|
||||||
volume = BlockDevices().get_block_device(name)
|
|
||||||
if volume is None:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False, code=404, message="Volume not found"
|
|
||||||
)
|
|
||||||
is_success = volume.mount()
|
|
||||||
if is_success:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Volume mounted, rebuild the system to apply changes",
|
|
||||||
)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False, code=409, message="Volume not mounted (already mounted?)"
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def unmount_volume(self, name: str) -> GenericMutationReturn:
|
|
||||||
"""Unmount volume"""
|
|
||||||
volume = BlockDevices().get_block_device(name)
|
|
||||||
if volume is None:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False, code=404, message="Volume not found"
|
|
||||||
)
|
|
||||||
is_success = volume.unmount()
|
|
||||||
if is_success:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Volume unmounted, rebuild the system to apply changes",
|
|
||||||
)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False, code=409, message="Volume not unmounted (already unmounted?)"
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobMutationReturn:
|
|
||||||
"""Migrate to binds"""
|
|
||||||
if is_bind_migrated():
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False, code=409, message="Already migrated to binds"
|
|
||||||
)
|
|
||||||
job = start_bind_migration(
|
|
||||||
BindMigrationConfig(
|
|
||||||
email_block_device=input.email_block_device,
|
|
||||||
bitwarden_block_device=input.bitwarden_block_device,
|
|
||||||
gitea_block_device=input.gitea_block_device,
|
|
||||||
nextcloud_block_device=input.nextcloud_block_device,
|
|
||||||
pleroma_block_device=input.pleroma_block_device,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Migration to binds started, rebuild the system to apply changes",
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
|
@ -1,211 +0,0 @@
|
||||||
"""System management mutations"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
GenericMutationReturn,
|
|
||||||
MutationReturnInterface,
|
|
||||||
GenericJobMutationReturn,
|
|
||||||
)
|
|
||||||
|
|
||||||
import selfprivacy_api.actions.system as system_actions
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
|
||||||
from selfprivacy_api.jobs.nix_collect_garbage import start_nix_collect_garbage
|
|
||||||
import selfprivacy_api.actions.ssh as ssh_actions
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class TimezoneMutationReturn(MutationReturnInterface):
|
|
||||||
"""Return type of the timezone mutation, contains timezone"""
|
|
||||||
|
|
||||||
timezone: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class AutoUpgradeSettingsMutationReturn(MutationReturnInterface):
|
|
||||||
"""Return type autoUpgrade Settings"""
|
|
||||||
|
|
||||||
enableAutoUpgrade: bool
|
|
||||||
allowReboot: bool
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SSHSettingsMutationReturn(MutationReturnInterface):
|
|
||||||
"""A return type for after changing SSH settings"""
|
|
||||||
|
|
||||||
enable: bool
|
|
||||||
password_authentication: bool
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class SSHSettingsInput:
|
|
||||||
"""Input type for SSH settings"""
|
|
||||||
|
|
||||||
enable: bool
|
|
||||||
password_authentication: bool
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class AutoUpgradeSettingsInput:
|
|
||||||
"""Input type for auto upgrade settings"""
|
|
||||||
|
|
||||||
enableAutoUpgrade: typing.Optional[bool] = None
|
|
||||||
allowReboot: typing.Optional[bool] = None
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SystemMutations:
|
|
||||||
"""Mutations related to system settings"""
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def change_timezone(self, timezone: str) -> TimezoneMutationReturn:
|
|
||||||
"""Change the timezone of the server. Timezone is a tzdatabase name."""
|
|
||||||
try:
|
|
||||||
system_actions.change_timezone(timezone)
|
|
||||||
except system_actions.InvalidTimezone as e:
|
|
||||||
return TimezoneMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
timezone=None,
|
|
||||||
)
|
|
||||||
return TimezoneMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Timezone changed",
|
|
||||||
code=200,
|
|
||||||
timezone=timezone,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def change_auto_upgrade_settings(
|
|
||||||
self, settings: AutoUpgradeSettingsInput
|
|
||||||
) -> AutoUpgradeSettingsMutationReturn:
|
|
||||||
"""Change auto upgrade settings of the server."""
|
|
||||||
system_actions.set_auto_upgrade_settings(
|
|
||||||
settings.enableAutoUpgrade, settings.allowReboot
|
|
||||||
)
|
|
||||||
|
|
||||||
new_settings = system_actions.get_auto_upgrade_settings()
|
|
||||||
|
|
||||||
return AutoUpgradeSettingsMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Auto-upgrade settings changed",
|
|
||||||
code=200,
|
|
||||||
enableAutoUpgrade=new_settings.enable,
|
|
||||||
allowReboot=new_settings.allowReboot,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def change_ssh_settings(
|
|
||||||
self, settings: SSHSettingsInput
|
|
||||||
) -> SSHSettingsMutationReturn:
|
|
||||||
"""Change ssh settings of the server."""
|
|
||||||
ssh_actions.set_ssh_settings(
|
|
||||||
enable=settings.enable,
|
|
||||||
password_authentication=settings.password_authentication,
|
|
||||||
)
|
|
||||||
|
|
||||||
new_settings = ssh_actions.get_ssh_settings()
|
|
||||||
|
|
||||||
return SSHSettingsMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="SSH settings changed",
|
|
||||||
code=200,
|
|
||||||
enable=new_settings.enable,
|
|
||||||
password_authentication=new_settings.passwordAuthentication,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def run_system_rebuild(self) -> GenericJobMutationReturn:
|
|
||||||
try:
|
|
||||||
job = system_actions.rebuild_system()
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Starting system rebuild",
|
|
||||||
code=200,
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
except system_actions.ShellException as e:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def run_system_rollback(self) -> GenericMutationReturn:
|
|
||||||
system_actions.rollback_system()
|
|
||||||
try:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Starting system rollback",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
except system_actions.ShellException as e:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def run_system_upgrade(self) -> GenericJobMutationReturn:
|
|
||||||
try:
|
|
||||||
job = system_actions.upgrade_system()
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Starting system upgrade",
|
|
||||||
code=200,
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
||||||
except system_actions.ShellException as e:
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def reboot_system(self) -> GenericMutationReturn:
|
|
||||||
system_actions.reboot_system()
|
|
||||||
try:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="System reboot has started",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
except system_actions.ShellException as e:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def pull_repository_changes(self) -> GenericMutationReturn:
|
|
||||||
result = system_actions.pull_repository_changes()
|
|
||||||
if result.status == 0:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Repository changes pulled",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=f"Failed to pull repository changes:\n{result.data}",
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def nix_collect_garbage(self) -> GenericJobMutationReturn:
|
|
||||||
job = start_nix_collect_garbage()
|
|
||||||
|
|
||||||
return GenericJobMutationReturn(
|
|
||||||
success=True,
|
|
||||||
code=200,
|
|
||||||
message="Garbage collector started...",
|
|
||||||
job=job_to_api_job(job),
|
|
||||||
)
|
|
|
@ -1,209 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Users management module"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.actions.users import UserNotFound
|
|
||||||
from selfprivacy_api.graphql.common_types.user import (
|
|
||||||
UserMutationReturn,
|
|
||||||
get_user_by_username,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.actions.ssh import (
|
|
||||||
InvalidPublicKey,
|
|
||||||
KeyAlreadyExists,
|
|
||||||
KeyNotFound,
|
|
||||||
create_ssh_key,
|
|
||||||
remove_ssh_key,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
|
||||||
GenericMutationReturn,
|
|
||||||
)
|
|
||||||
import selfprivacy_api.actions.users as users_actions
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class UserMutationInput:
|
|
||||||
"""Input type for user mutation"""
|
|
||||||
|
|
||||||
username: str
|
|
||||||
password: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.input
|
|
||||||
class SshMutationInput:
|
|
||||||
"""Input type for ssh mutation"""
|
|
||||||
|
|
||||||
username: str
|
|
||||||
ssh_key: str
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class UsersMutations:
|
|
||||||
"""Mutations change user settings"""
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def create_user(self, user: UserMutationInput) -> UserMutationReturn:
|
|
||||||
try:
|
|
||||||
users_actions.create_user(user.username, user.password)
|
|
||||||
except users_actions.PasswordIsEmpty as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except users_actions.UsernameForbidden as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=409,
|
|
||||||
)
|
|
||||||
except users_actions.UsernameNotAlphanumeric as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except users_actions.UsernameTooLong as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except users_actions.InvalidConfiguration as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except users_actions.UserAlreadyExists as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=409,
|
|
||||||
user=get_user_by_username(user.username),
|
|
||||||
)
|
|
||||||
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="User created",
|
|
||||||
code=201,
|
|
||||||
user=get_user_by_username(user.username),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def delete_user(self, username: str) -> GenericMutationReturn:
|
|
||||||
try:
|
|
||||||
users_actions.delete_user(username)
|
|
||||||
except users_actions.UserNotFound as e:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except users_actions.UserIsProtected as e:
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="User deleted",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def update_user(self, user: UserMutationInput) -> UserMutationReturn:
|
|
||||||
"""Update user mutation"""
|
|
||||||
try:
|
|
||||||
users_actions.update_user(user.username, user.password)
|
|
||||||
except users_actions.PasswordIsEmpty as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except users_actions.UserNotFound as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="User updated",
|
|
||||||
code=200,
|
|
||||||
user=get_user_by_username(user.username),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
|
||||||
"""Add a new ssh key"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
|
||||||
except KeyAlreadyExists:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Key already exists",
|
|
||||||
code=409,
|
|
||||||
)
|
|
||||||
except InvalidPublicKey:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Invalid key type. Only ssh-ed25519, ssh-rsa and ecdsa are supported",
|
|
||||||
code=400,
|
|
||||||
)
|
|
||||||
except UserNotFound:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="User not found",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="New SSH key successfully written",
|
|
||||||
code=201,
|
|
||||||
user=get_user_by_username(ssh_input.username),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
|
||||||
"""Remove ssh key from user"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
|
||||||
except KeyNotFound:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="Key not found",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except UserNotFound:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message="User not found",
|
|
||||||
code=404,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=False,
|
|
||||||
message=str(e),
|
|
||||||
code=500,
|
|
||||||
)
|
|
||||||
|
|
||||||
return UserMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="SSH key successfully removed",
|
|
||||||
code=200,
|
|
||||||
user=get_user_by_username(ssh_input.username),
|
|
||||||
)
|
|
|
@ -1,83 +0,0 @@
|
||||||
"""API access status"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import datetime
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from strawberry.types import Info
|
|
||||||
from selfprivacy_api.actions.api_tokens import (
|
|
||||||
get_api_tokens_with_caller_flag,
|
|
||||||
get_api_recovery_token_status,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.dependencies import get_api_version as get_api_version_dependency
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_version() -> str:
|
|
||||||
"""Get API version"""
|
|
||||||
return get_api_version_dependency()
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ApiDevice:
|
|
||||||
"""A single device with SelfPrivacy app installed"""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
creation_date: datetime.datetime
|
|
||||||
is_caller: bool
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class ApiRecoveryKeyStatus:
|
|
||||||
"""Recovery key status"""
|
|
||||||
|
|
||||||
exists: bool
|
|
||||||
valid: bool
|
|
||||||
creation_date: typing.Optional[datetime.datetime]
|
|
||||||
expiration_date: typing.Optional[datetime.datetime]
|
|
||||||
uses_left: typing.Optional[int]
|
|
||||||
|
|
||||||
|
|
||||||
def get_recovery_key_status() -> ApiRecoveryKeyStatus:
|
|
||||||
"""Get recovery key status, times are timezone-aware"""
|
|
||||||
status = get_api_recovery_token_status()
|
|
||||||
if status is None or not status.exists:
|
|
||||||
return ApiRecoveryKeyStatus(
|
|
||||||
exists=False,
|
|
||||||
valid=False,
|
|
||||||
creation_date=None,
|
|
||||||
expiration_date=None,
|
|
||||||
uses_left=None,
|
|
||||||
)
|
|
||||||
return ApiRecoveryKeyStatus(
|
|
||||||
exists=True,
|
|
||||||
valid=status.valid,
|
|
||||||
creation_date=status.date,
|
|
||||||
expiration_date=status.expiration,
|
|
||||||
uses_left=status.uses_left,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Api:
|
|
||||||
"""API access status"""
|
|
||||||
|
|
||||||
version: str = strawberry.field(resolver=get_api_version)
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def devices(self, info: Info) -> typing.List[ApiDevice]:
|
|
||||||
return [
|
|
||||||
ApiDevice(
|
|
||||||
name=device.name,
|
|
||||||
creation_date=device.date,
|
|
||||||
is_caller=device.is_caller,
|
|
||||||
)
|
|
||||||
for device in get_api_tokens_with_caller_flag(
|
|
||||||
info.context["request"]
|
|
||||||
.headers.get("Authorization", "")
|
|
||||||
.replace("Bearer ", "")
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
recovery_key: ApiRecoveryKeyStatus = strawberry.field(
|
|
||||||
resolver=get_recovery_key_status, permission_classes=[IsAuthenticated]
|
|
||||||
)
|
|
|
@ -1,95 +0,0 @@
|
||||||
"""Backup"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
|
|
||||||
from selfprivacy_api.backup import Backups
|
|
||||||
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
|
||||||
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
|
||||||
from selfprivacy_api.graphql.common_types.service import (
|
|
||||||
Service,
|
|
||||||
ServiceStatusEnum,
|
|
||||||
SnapshotInfo,
|
|
||||||
service_to_graphql_service,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import AutobackupQuotas
|
|
||||||
from selfprivacy_api.services import get_service_by_id
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class BackupConfiguration:
|
|
||||||
provider: BackupProvider
|
|
||||||
# When server is lost, the app should have the key to decrypt backups
|
|
||||||
# on a new server
|
|
||||||
encryption_key: str
|
|
||||||
# False when repo is not initialized and not ready to be used
|
|
||||||
is_initialized: bool
|
|
||||||
# If none, autobackups are disabled
|
|
||||||
autobackup_period: typing.Optional[int]
|
|
||||||
# None is equal to all quotas being unlimited (-1). Optional for compatibility reasons.
|
|
||||||
autobackup_quotas: AutobackupQuotas
|
|
||||||
# Bucket name for Backblaze, path for some other providers
|
|
||||||
location_name: typing.Optional[str]
|
|
||||||
location_id: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: Ideally this should not be done in API but making an internal Service requires more work
|
|
||||||
# than to make an API record about a service
|
|
||||||
def tombstone_service(service_id: str) -> Service:
|
|
||||||
return Service(
|
|
||||||
id=service_id,
|
|
||||||
display_name=f"{service_id} (Orphaned)",
|
|
||||||
description="",
|
|
||||||
svg_icon="",
|
|
||||||
is_movable=False,
|
|
||||||
is_required=False,
|
|
||||||
is_enabled=False,
|
|
||||||
status=ServiceStatusEnum.OFF,
|
|
||||||
url=None,
|
|
||||||
can_be_backed_up=False,
|
|
||||||
backup_description="",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Backup:
|
|
||||||
@strawberry.field
|
|
||||||
def configuration(self) -> BackupConfiguration:
|
|
||||||
return BackupConfiguration(
|
|
||||||
provider=Backups.provider().name,
|
|
||||||
encryption_key=LocalBackupSecret.get(),
|
|
||||||
is_initialized=Backups.is_initted(),
|
|
||||||
autobackup_period=Backups.autobackup_period_minutes(),
|
|
||||||
location_name=Backups.provider().location,
|
|
||||||
location_id=Backups.provider().repo_id,
|
|
||||||
autobackup_quotas=Backups.autobackup_quotas(),
|
|
||||||
)
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def all_snapshots(self) -> typing.List[SnapshotInfo]:
|
|
||||||
if not Backups.is_initted():
|
|
||||||
return []
|
|
||||||
result = []
|
|
||||||
snapshots = Backups.get_all_snapshots()
|
|
||||||
for snap in snapshots:
|
|
||||||
api_service = None
|
|
||||||
service = get_service_by_id(snap.service_name)
|
|
||||||
|
|
||||||
if service is None:
|
|
||||||
api_service = tombstone_service(snap.service_name)
|
|
||||||
else:
|
|
||||||
api_service = service_to_graphql_service(service)
|
|
||||||
if api_service is None:
|
|
||||||
raise NotImplementedError(
|
|
||||||
f"Could not construct API Service record for:{snap.service_name}. This should be unreachable and is a bug if you see it."
|
|
||||||
)
|
|
||||||
|
|
||||||
graphql_snap = SnapshotInfo(
|
|
||||||
id=snap.id,
|
|
||||||
service=api_service,
|
|
||||||
created_at=snap.created_at,
|
|
||||||
reason=snap.reason,
|
|
||||||
)
|
|
||||||
result.append(graphql_snap)
|
|
||||||
return result
|
|
|
@ -1,30 +0,0 @@
|
||||||
"""Common types and enums used by different types of queries."""
|
|
||||||
from enum import Enum
|
|
||||||
import datetime
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class Severity(Enum):
|
|
||||||
"""
|
|
||||||
Severity of an alert.
|
|
||||||
"""
|
|
||||||
|
|
||||||
INFO = "INFO"
|
|
||||||
WARNING = "WARNING"
|
|
||||||
ERROR = "ERROR"
|
|
||||||
CRITICAL = "CRITICAL"
|
|
||||||
SUCCESS = "SUCCESS"
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Alert:
|
|
||||||
"""
|
|
||||||
Alert type.
|
|
||||||
"""
|
|
||||||
|
|
||||||
severity: Severity
|
|
||||||
title: str
|
|
||||||
message: str
|
|
||||||
timestamp: typing.Optional[datetime.datetime]
|
|
|
@ -1,24 +0,0 @@
|
||||||
"""Jobs status"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql.common_types.jobs import (
|
|
||||||
ApiJob,
|
|
||||||
get_api_job_by_id,
|
|
||||||
job_to_api_job,
|
|
||||||
)
|
|
||||||
|
|
||||||
from selfprivacy_api.jobs import Jobs
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Job:
|
|
||||||
@strawberry.field
|
|
||||||
def get_jobs(self) -> typing.List[ApiJob]:
|
|
||||||
Jobs.get_jobs()
|
|
||||||
|
|
||||||
return [job_to_api_job(job) for job in Jobs.get_jobs()]
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def get_job(self, job_id: str) -> typing.Optional[ApiJob]:
|
|
||||||
return get_api_job_by_id(job_id)
|
|
|
@ -1,26 +0,0 @@
|
||||||
"""Enums representing different service providers."""
|
|
||||||
from enum import Enum
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class DnsProvider(Enum):
|
|
||||||
CLOUDFLARE = "CLOUDFLARE"
|
|
||||||
DIGITALOCEAN = "DIGITALOCEAN"
|
|
||||||
DESEC = "DESEC"
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class ServerProvider(Enum):
|
|
||||||
HETZNER = "HETZNER"
|
|
||||||
DIGITALOCEAN = "DIGITALOCEAN"
|
|
||||||
OTHER = "OTHER"
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.enum
|
|
||||||
class BackupProvider(Enum):
|
|
||||||
BACKBLAZE = "BACKBLAZE"
|
|
||||||
NONE = "NONE"
|
|
||||||
# for testing purposes, make sure not selectable in prod.
|
|
||||||
MEMORY = "MEMORY"
|
|
||||||
FILE = "FILE"
|
|
|
@ -1,18 +0,0 @@
|
||||||
"""Services status"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.service import (
|
|
||||||
Service,
|
|
||||||
service_to_graphql_service,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.services import get_all_services
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Services:
|
|
||||||
@strawberry.field
|
|
||||||
def all_services(self) -> typing.List[Service]:
|
|
||||||
services = get_all_services()
|
|
||||||
return [service_to_graphql_service(service) for service in services]
|
|
|
@ -1,33 +0,0 @@
|
||||||
"""Storage queries."""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.service import (
|
|
||||||
StorageVolume,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Storage:
|
|
||||||
"""GraphQL queries to get storage information."""
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def volumes(self) -> typing.List[StorageVolume]:
|
|
||||||
"""Get list of volumes"""
|
|
||||||
return [
|
|
||||||
StorageVolume(
|
|
||||||
total_space=str(volume.fssize)
|
|
||||||
if volume.fssize is not None
|
|
||||||
else str(volume.size),
|
|
||||||
free_space=str(volume.fsavail),
|
|
||||||
used_space=str(volume.fsused),
|
|
||||||
root=volume.is_root(),
|
|
||||||
name=volume.name,
|
|
||||||
model=volume.model,
|
|
||||||
serial=volume.serial,
|
|
||||||
type=volume.type,
|
|
||||||
)
|
|
||||||
for volume in BlockDevices().get_block_devices()
|
|
||||||
]
|
|
|
@ -1,171 +0,0 @@
|
||||||
"""Common system information and settings"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import os
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql.common_types.dns import DnsRecord
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.queries.common import Alert, Severity
|
|
||||||
from selfprivacy_api.graphql.queries.providers import DnsProvider, ServerProvider
|
|
||||||
from selfprivacy_api.jobs import Jobs
|
|
||||||
from selfprivacy_api.jobs.migrate_to_binds import is_bind_migrated
|
|
||||||
from selfprivacy_api.services import get_all_required_dns_records
|
|
||||||
from selfprivacy_api.utils import ReadUserData
|
|
||||||
import selfprivacy_api.actions.system as system_actions
|
|
||||||
import selfprivacy_api.actions.ssh as ssh_actions
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SystemDomainInfo:
|
|
||||||
"""Information about the system domain"""
|
|
||||||
|
|
||||||
domain: str
|
|
||||||
hostname: str
|
|
||||||
provider: DnsProvider
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def required_dns_records(self) -> typing.List[DnsRecord]:
|
|
||||||
"""Collect all required DNS records for all services"""
|
|
||||||
return [
|
|
||||||
DnsRecord(
|
|
||||||
record_type=record.type,
|
|
||||||
name=record.name,
|
|
||||||
content=record.content,
|
|
||||||
ttl=record.ttl,
|
|
||||||
priority=record.priority,
|
|
||||||
display_name=record.display_name,
|
|
||||||
)
|
|
||||||
for record in get_all_required_dns_records()
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_domain_info() -> SystemDomainInfo:
|
|
||||||
"""Get basic system domain info"""
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
return SystemDomainInfo(
|
|
||||||
domain=user_data["domain"],
|
|
||||||
hostname=user_data["hostname"],
|
|
||||||
provider=user_data["dns"]["provider"],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class AutoUpgradeOptions:
|
|
||||||
"""Automatic upgrade options"""
|
|
||||||
|
|
||||||
enable: bool
|
|
||||||
allow_reboot: bool
|
|
||||||
|
|
||||||
|
|
||||||
def get_auto_upgrade_options() -> AutoUpgradeOptions:
|
|
||||||
"""Get automatic upgrade options"""
|
|
||||||
settings = system_actions.get_auto_upgrade_settings()
|
|
||||||
return AutoUpgradeOptions(
|
|
||||||
enable=settings.enable,
|
|
||||||
allow_reboot=settings.allowReboot,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SshSettings:
|
|
||||||
"""SSH settings and root SSH keys"""
|
|
||||||
|
|
||||||
enable: bool
|
|
||||||
password_authentication: bool
|
|
||||||
root_ssh_keys: typing.List[str]
|
|
||||||
|
|
||||||
|
|
||||||
def get_ssh_settings() -> SshSettings:
|
|
||||||
"""Get SSH settings"""
|
|
||||||
settings = ssh_actions.get_ssh_settings()
|
|
||||||
return SshSettings(
|
|
||||||
enable=settings.enable,
|
|
||||||
password_authentication=settings.passwordAuthentication,
|
|
||||||
root_ssh_keys=settings.rootKeys,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_timezone() -> str:
|
|
||||||
"""Get system timezone"""
|
|
||||||
return system_actions.get_timezone()
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SystemSettings:
|
|
||||||
"""Common system settings"""
|
|
||||||
|
|
||||||
auto_upgrade: AutoUpgradeOptions = strawberry.field(
|
|
||||||
resolver=get_auto_upgrade_options
|
|
||||||
)
|
|
||||||
ssh: SshSettings = strawberry.field(resolver=get_ssh_settings)
|
|
||||||
timezone: str = strawberry.field(resolver=get_system_timezone)
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_version() -> str:
|
|
||||||
"""Get system version"""
|
|
||||||
return system_actions.get_system_version()
|
|
||||||
|
|
||||||
|
|
||||||
def get_python_version() -> str:
|
|
||||||
"""Get Python version"""
|
|
||||||
return system_actions.get_python_version()
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SystemInfo:
|
|
||||||
"""System components versions"""
|
|
||||||
|
|
||||||
system_version: str = strawberry.field(resolver=get_system_version)
|
|
||||||
python_version: str = strawberry.field(resolver=get_python_version)
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def using_binds(self) -> bool:
|
|
||||||
"""Check if the system is using BINDs"""
|
|
||||||
return is_bind_migrated()
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class SystemProviderInfo:
|
|
||||||
"""Information about the VPS/Dedicated server provider"""
|
|
||||||
|
|
||||||
provider: ServerProvider
|
|
||||||
id: str
|
|
||||||
|
|
||||||
|
|
||||||
def get_system_provider_info() -> SystemProviderInfo:
|
|
||||||
"""Get system provider info"""
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
return SystemProviderInfo(
|
|
||||||
provider=user_data["server"]["provider"],
|
|
||||||
id="UNKNOWN",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class System:
|
|
||||||
"""
|
|
||||||
Base system type which represents common system status
|
|
||||||
"""
|
|
||||||
|
|
||||||
status: Alert = strawberry.field(
|
|
||||||
resolver=lambda: Alert(
|
|
||||||
severity=Severity.INFO,
|
|
||||||
title="Test message",
|
|
||||||
message="Test message",
|
|
||||||
timestamp=None,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
domain_info: SystemDomainInfo = strawberry.field(resolver=get_system_domain_info)
|
|
||||||
settings: SystemSettings = SystemSettings()
|
|
||||||
info: SystemInfo = SystemInfo()
|
|
||||||
provider: SystemProviderInfo = strawberry.field(resolver=get_system_provider_info)
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def busy(self) -> bool:
|
|
||||||
"""Check if the system is busy"""
|
|
||||||
return Jobs.is_busy()
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def working_directory(self) -> str:
|
|
||||||
"""Get working directory"""
|
|
||||||
return os.getcwd()
|
|
|
@ -1,23 +0,0 @@
|
||||||
"""Users"""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
import typing
|
|
||||||
import strawberry
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.user import (
|
|
||||||
User,
|
|
||||||
get_user_by_username,
|
|
||||||
get_users,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Users:
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def get_user(self, username: str) -> typing.Optional[User]:
|
|
||||||
"""Get users"""
|
|
||||||
return get_user_by_username(username)
|
|
||||||
|
|
||||||
all_users: typing.List[User] = strawberry.field(
|
|
||||||
permission_classes=[IsAuthenticated], resolver=get_users
|
|
||||||
)
|
|
|
@ -1,150 +0,0 @@
|
||||||
"""GraphQL API for SelfPrivacy."""
|
|
||||||
# pylint: disable=too-few-public-methods
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from typing import AsyncGenerator
|
|
||||||
import strawberry
|
|
||||||
from selfprivacy_api.graphql import IsAuthenticated
|
|
||||||
from selfprivacy_api.graphql.mutations.deprecated_mutations import (
|
|
||||||
DeprecatedApiMutations,
|
|
||||||
DeprecatedJobMutations,
|
|
||||||
DeprecatedServicesMutations,
|
|
||||||
DeprecatedStorageMutations,
|
|
||||||
DeprecatedSystemMutations,
|
|
||||||
DeprecatedUsersMutations,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.graphql.mutations.api_mutations import ApiMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
|
|
||||||
from selfprivacy_api.graphql.mutations.services_mutations import ServicesMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.system_mutations import SystemMutations
|
|
||||||
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.queries.api_queries import Api
|
|
||||||
from selfprivacy_api.graphql.queries.backup import Backup
|
|
||||||
from selfprivacy_api.graphql.queries.jobs import Job
|
|
||||||
from selfprivacy_api.graphql.queries.services import Services
|
|
||||||
from selfprivacy_api.graphql.queries.storage import Storage
|
|
||||||
from selfprivacy_api.graphql.queries.system import System
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
|
||||||
from selfprivacy_api.graphql.queries.users import Users
|
|
||||||
from selfprivacy_api.jobs.test import test_job
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Query:
|
|
||||||
"""Root schema for queries"""
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def api(self) -> Api:
|
|
||||||
"""API access status"""
|
|
||||||
return Api()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def system(self) -> System:
|
|
||||||
"""System queries"""
|
|
||||||
return System()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def users(self) -> Users:
|
|
||||||
"""Users queries"""
|
|
||||||
return Users()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def storage(self) -> Storage:
|
|
||||||
"""Storage queries"""
|
|
||||||
return Storage()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def jobs(self) -> Job:
|
|
||||||
"""Jobs queries"""
|
|
||||||
return Job()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def services(self) -> Services:
|
|
||||||
"""Services queries"""
|
|
||||||
return Services()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def backup(self) -> Backup:
|
|
||||||
"""Backup queries"""
|
|
||||||
return Backup()
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Mutation(
|
|
||||||
DeprecatedApiMutations,
|
|
||||||
DeprecatedSystemMutations,
|
|
||||||
DeprecatedUsersMutations,
|
|
||||||
DeprecatedStorageMutations,
|
|
||||||
DeprecatedServicesMutations,
|
|
||||||
DeprecatedJobMutations,
|
|
||||||
):
|
|
||||||
"""Root schema for mutations"""
|
|
||||||
|
|
||||||
@strawberry.field
|
|
||||||
def api(self) -> ApiMutations:
|
|
||||||
"""API mutations"""
|
|
||||||
return ApiMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def system(self) -> SystemMutations:
|
|
||||||
"""System mutations"""
|
|
||||||
return SystemMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def users(self) -> UsersMutations:
|
|
||||||
"""Users mutations"""
|
|
||||||
return UsersMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def storage(self) -> StorageMutations:
|
|
||||||
"""Storage mutations"""
|
|
||||||
return StorageMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def services(self) -> ServicesMutations:
|
|
||||||
"""Services mutations"""
|
|
||||||
return ServicesMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def jobs(self) -> JobMutations:
|
|
||||||
"""Jobs mutations"""
|
|
||||||
return JobMutations()
|
|
||||||
|
|
||||||
@strawberry.field(permission_classes=[IsAuthenticated])
|
|
||||||
def backup(self) -> BackupMutations:
|
|
||||||
"""Backup mutations"""
|
|
||||||
return BackupMutations()
|
|
||||||
|
|
||||||
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
|
||||||
def test_mutation(self) -> GenericMutationReturn:
|
|
||||||
"""Test mutation"""
|
|
||||||
test_job()
|
|
||||||
return GenericMutationReturn(
|
|
||||||
success=True,
|
|
||||||
message="Test mutation",
|
|
||||||
code=200,
|
|
||||||
)
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@strawberry.type
|
|
||||||
class Subscription:
|
|
||||||
"""Root schema for subscriptions"""
|
|
||||||
|
|
||||||
@strawberry.subscription(permission_classes=[IsAuthenticated])
|
|
||||||
async def count(self, target: int = 100) -> AsyncGenerator[int, None]:
|
|
||||||
for i in range(target):
|
|
||||||
yield i
|
|
||||||
await asyncio.sleep(0.5)
|
|
||||||
|
|
||||||
|
|
||||||
schema = strawberry.Schema(
|
|
||||||
query=Query,
|
|
||||||
mutation=Mutation,
|
|
||||||
subscription=Subscription,
|
|
||||||
)
|
|
|
@ -1,323 +0,0 @@
|
||||||
"""
|
|
||||||
Jobs controller. It handles the jobs that are created by the user.
|
|
||||||
This is a singleton class holding the jobs list.
|
|
||||||
Jobs can be added and removed.
|
|
||||||
A single job can be updated.
|
|
||||||
A job is a dictionary with the following keys:
|
|
||||||
- id: unique identifier of the job
|
|
||||||
- name: name of the job
|
|
||||||
- description: description of the job
|
|
||||||
- status: status of the job
|
|
||||||
- created_at: date of creation of the job, naive localtime
|
|
||||||
- updated_at: date of last update of the job, naive localtime
|
|
||||||
- finished_at: date of finish of the job
|
|
||||||
- error: error message if the job failed
|
|
||||||
- result: result of the job
|
|
||||||
"""
|
|
||||||
import typing
|
|
||||||
import datetime
|
|
||||||
from uuid import UUID
|
|
||||||
import uuid
|
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.redis_pool import RedisPool
|
|
||||||
|
|
||||||
JOB_EXPIRATION_SECONDS = 10 * 24 * 60 * 60 # ten days
|
|
||||||
|
|
||||||
STATUS_LOGS_PREFIX = "jobs_logs:status:"
|
|
||||||
PROGRESS_LOGS_PREFIX = "jobs_logs:progress:"
|
|
||||||
|
|
||||||
|
|
||||||
class JobStatus(str, Enum):
|
|
||||||
"""
|
|
||||||
Status of a job.
|
|
||||||
"""
|
|
||||||
|
|
||||||
CREATED = "CREATED"
|
|
||||||
RUNNING = "RUNNING"
|
|
||||||
FINISHED = "FINISHED"
|
|
||||||
ERROR = "ERROR"
|
|
||||||
|
|
||||||
|
|
||||||
class Job(BaseModel):
|
|
||||||
"""
|
|
||||||
Job class.
|
|
||||||
"""
|
|
||||||
|
|
||||||
uid: UUID
|
|
||||||
type_id: str
|
|
||||||
name: str
|
|
||||||
description: str
|
|
||||||
status: JobStatus
|
|
||||||
status_text: typing.Optional[str]
|
|
||||||
progress: typing.Optional[int]
|
|
||||||
created_at: datetime.datetime
|
|
||||||
updated_at: datetime.datetime
|
|
||||||
finished_at: typing.Optional[datetime.datetime]
|
|
||||||
error: typing.Optional[str]
|
|
||||||
result: typing.Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
class Jobs:
|
|
||||||
"""
|
|
||||||
Jobs class.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset() -> None:
|
|
||||||
"""
|
|
||||||
Reset the jobs list.
|
|
||||||
"""
|
|
||||||
jobs = Jobs.get_jobs()
|
|
||||||
for job in jobs:
|
|
||||||
Jobs.remove(job)
|
|
||||||
Jobs.reset_logs()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def add(
|
|
||||||
name: str,
|
|
||||||
type_id: str,
|
|
||||||
description: str,
|
|
||||||
status: JobStatus = JobStatus.CREATED,
|
|
||||||
status_text: str = "",
|
|
||||||
progress: int = 0,
|
|
||||||
) -> Job:
|
|
||||||
"""
|
|
||||||
Add a job to the jobs list.
|
|
||||||
"""
|
|
||||||
job = Job(
|
|
||||||
uid=uuid.uuid4(),
|
|
||||||
name=name,
|
|
||||||
type_id=type_id,
|
|
||||||
description=description,
|
|
||||||
status=status,
|
|
||||||
status_text=status_text,
|
|
||||||
progress=progress,
|
|
||||||
created_at=datetime.datetime.now(),
|
|
||||||
updated_at=datetime.datetime.now(),
|
|
||||||
finished_at=None,
|
|
||||||
error=None,
|
|
||||||
result=None,
|
|
||||||
)
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
_store_job_as_hash(redis, _redis_key_from_uuid(job.uid), job)
|
|
||||||
return job
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def remove(job: Job) -> None:
|
|
||||||
"""
|
|
||||||
Remove a job from the jobs list.
|
|
||||||
"""
|
|
||||||
Jobs.remove_by_uid(str(job.uid))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def remove_by_uid(job_uuid: str) -> bool:
|
|
||||||
"""
|
|
||||||
Remove a job from the jobs list.
|
|
||||||
"""
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _redis_key_from_uuid(job_uuid)
|
|
||||||
if redis.exists(key):
|
|
||||||
redis.delete(key)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def reset_logs() -> None:
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
for key in redis.keys(STATUS_LOGS_PREFIX + "*"):
|
|
||||||
redis.delete(key)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def log_status_update(job: Job, status: JobStatus) -> None:
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _status_log_key_from_uuid(job.uid)
|
|
||||||
redis.lpush(key, status.value)
|
|
||||||
redis.expire(key, 10)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def log_progress_update(job: Job, progress: int) -> None:
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _progress_log_key_from_uuid(job.uid)
|
|
||||||
redis.lpush(key, progress)
|
|
||||||
redis.expire(key, 10)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def status_updates(job: Job) -> list[JobStatus]:
|
|
||||||
result: list[JobStatus] = []
|
|
||||||
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _status_log_key_from_uuid(job.uid)
|
|
||||||
if not redis.exists(key):
|
|
||||||
return []
|
|
||||||
|
|
||||||
status_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
|
||||||
for status in status_strings:
|
|
||||||
try:
|
|
||||||
result.append(JobStatus[status])
|
|
||||||
except KeyError as error:
|
|
||||||
raise ValueError("impossible job status: " + status) from error
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def progress_updates(job: Job) -> list[int]:
|
|
||||||
result: list[int] = []
|
|
||||||
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _progress_log_key_from_uuid(job.uid)
|
|
||||||
if not redis.exists(key):
|
|
||||||
return []
|
|
||||||
|
|
||||||
progress_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
|
||||||
for progress in progress_strings:
|
|
||||||
try:
|
|
||||||
result.append(int(progress))
|
|
||||||
except KeyError as error:
|
|
||||||
raise ValueError("impossible job progress: " + progress) from error
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def update(
|
|
||||||
job: Job,
|
|
||||||
status: JobStatus,
|
|
||||||
status_text: typing.Optional[str] = None,
|
|
||||||
progress: typing.Optional[int] = None,
|
|
||||||
name: typing.Optional[str] = None,
|
|
||||||
description: typing.Optional[str] = None,
|
|
||||||
error: typing.Optional[str] = None,
|
|
||||||
result: typing.Optional[str] = None,
|
|
||||||
) -> Job:
|
|
||||||
"""
|
|
||||||
Update a job in the jobs list.
|
|
||||||
"""
|
|
||||||
if name is not None:
|
|
||||||
job.name = name
|
|
||||||
if description is not None:
|
|
||||||
job.description = description
|
|
||||||
if status_text is not None:
|
|
||||||
job.status_text = status_text
|
|
||||||
|
|
||||||
# if it is finished it is 100
|
|
||||||
# unless user says otherwise
|
|
||||||
if status == JobStatus.FINISHED and progress is None:
|
|
||||||
progress = 100
|
|
||||||
if progress is not None and job.progress != progress:
|
|
||||||
job.progress = progress
|
|
||||||
Jobs.log_progress_update(job, progress)
|
|
||||||
|
|
||||||
job.status = status
|
|
||||||
Jobs.log_status_update(job, status)
|
|
||||||
job.updated_at = datetime.datetime.now()
|
|
||||||
job.error = error
|
|
||||||
job.result = result
|
|
||||||
if status in (JobStatus.FINISHED, JobStatus.ERROR):
|
|
||||||
job.finished_at = datetime.datetime.now()
|
|
||||||
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _redis_key_from_uuid(job.uid)
|
|
||||||
if redis.exists(key):
|
|
||||||
_store_job_as_hash(redis, key, job)
|
|
||||||
if status in (JobStatus.FINISHED, JobStatus.ERROR):
|
|
||||||
redis.expire(key, JOB_EXPIRATION_SECONDS)
|
|
||||||
|
|
||||||
return job
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_expiration(job: Job, expiration_seconds: int) -> Job:
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _redis_key_from_uuid(job.uid)
|
|
||||||
if redis.exists(key):
|
|
||||||
redis.expire(key, expiration_seconds)
|
|
||||||
return job
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_job(uid: str) -> typing.Optional[Job]:
|
|
||||||
"""
|
|
||||||
Get a job from the jobs list.
|
|
||||||
"""
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
key = _redis_key_from_uuid(uid)
|
|
||||||
if redis.exists(key):
|
|
||||||
return _job_from_hash(redis, key)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_jobs() -> typing.List[Job]:
|
|
||||||
"""
|
|
||||||
Get the jobs list.
|
|
||||||
"""
|
|
||||||
redis = RedisPool().get_connection()
|
|
||||||
job_keys = redis.keys("jobs:*")
|
|
||||||
jobs = []
|
|
||||||
for job_key in job_keys:
|
|
||||||
job = _job_from_hash(redis, job_key)
|
|
||||||
if job is not None:
|
|
||||||
jobs.append(job)
|
|
||||||
return jobs
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_busy() -> bool:
|
|
||||||
"""
|
|
||||||
Check if there is a job running.
|
|
||||||
"""
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if job.status == JobStatus.RUNNING:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def report_progress(progress: int, job: Job, status_text: str) -> None:
|
|
||||||
"""
|
|
||||||
A terse way to call a common operation, for readability
|
|
||||||
job.report_progress() would be even better
|
|
||||||
but it would go against how this file is written
|
|
||||||
"""
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text=status_text,
|
|
||||||
progress=progress,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _redis_key_from_uuid(uuid_string) -> str:
|
|
||||||
return "jobs:" + str(uuid_string)
|
|
||||||
|
|
||||||
|
|
||||||
def _status_log_key_from_uuid(uuid_string) -> str:
|
|
||||||
return STATUS_LOGS_PREFIX + str(uuid_string)
|
|
||||||
|
|
||||||
|
|
||||||
def _progress_log_key_from_uuid(uuid_string) -> str:
|
|
||||||
return PROGRESS_LOGS_PREFIX + str(uuid_string)
|
|
||||||
|
|
||||||
|
|
||||||
def _store_job_as_hash(redis, redis_key, model) -> None:
|
|
||||||
for key, value in model.dict().items():
|
|
||||||
if isinstance(value, uuid.UUID):
|
|
||||||
value = str(value)
|
|
||||||
if isinstance(value, datetime.datetime):
|
|
||||||
value = value.isoformat()
|
|
||||||
if isinstance(value, JobStatus):
|
|
||||||
value = value.value
|
|
||||||
redis.hset(redis_key, key, str(value))
|
|
||||||
|
|
||||||
|
|
||||||
def _job_from_hash(redis, redis_key) -> typing.Optional[Job]:
|
|
||||||
if redis.exists(redis_key):
|
|
||||||
job_dict = redis.hgetall(redis_key)
|
|
||||||
for date in [
|
|
||||||
"created_at",
|
|
||||||
"updated_at",
|
|
||||||
"finished_at",
|
|
||||||
]:
|
|
||||||
if job_dict[date] != "None":
|
|
||||||
job_dict[date] = datetime.datetime.fromisoformat(job_dict[date])
|
|
||||||
for key in job_dict.keys():
|
|
||||||
if job_dict[key] == "None":
|
|
||||||
job_dict[key] = None
|
|
||||||
|
|
||||||
return Job(**job_dict)
|
|
||||||
return None
|
|
|
@ -1,329 +0,0 @@
|
||||||
"""Function to perform migration of app data to binds."""
|
|
||||||
import subprocess
|
|
||||||
import pathlib
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
|
||||||
from selfprivacy_api.services.bitwarden import Bitwarden
|
|
||||||
from selfprivacy_api.services.gitea import Gitea
|
|
||||||
from selfprivacy_api.services.mailserver import MailServer
|
|
||||||
from selfprivacy_api.services.nextcloud import Nextcloud
|
|
||||||
from selfprivacy_api.services.pleroma import Pleroma
|
|
||||||
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
|
||||||
from selfprivacy_api.utils.huey import huey
|
|
||||||
from selfprivacy_api.utils.block_devices import BlockDevices
|
|
||||||
|
|
||||||
|
|
||||||
class BindMigrationConfig(BaseModel):
|
|
||||||
"""Config for bind migration.
|
|
||||||
For each service provide block device name.
|
|
||||||
"""
|
|
||||||
|
|
||||||
email_block_device: str
|
|
||||||
bitwarden_block_device: str
|
|
||||||
gitea_block_device: str
|
|
||||||
nextcloud_block_device: str
|
|
||||||
pleroma_block_device: str
|
|
||||||
|
|
||||||
|
|
||||||
def is_bind_migrated() -> bool:
|
|
||||||
"""Check if bind migration was performed."""
|
|
||||||
with ReadUserData() as user_data:
|
|
||||||
return user_data.get("useBinds", False)
|
|
||||||
|
|
||||||
|
|
||||||
def activate_binds(config: BindMigrationConfig):
|
|
||||||
"""Activate binds."""
|
|
||||||
# Activate binds in userdata
|
|
||||||
with WriteUserData() as user_data:
|
|
||||||
if "email" not in user_data:
|
|
||||||
user_data["email"] = {}
|
|
||||||
user_data["email"]["location"] = config.email_block_device
|
|
||||||
if "bitwarden" not in user_data:
|
|
||||||
user_data["bitwarden"] = {}
|
|
||||||
user_data["bitwarden"]["location"] = config.bitwarden_block_device
|
|
||||||
if "gitea" not in user_data:
|
|
||||||
user_data["gitea"] = {}
|
|
||||||
user_data["gitea"]["location"] = config.gitea_block_device
|
|
||||||
if "nextcloud" not in user_data:
|
|
||||||
user_data["nextcloud"] = {}
|
|
||||||
user_data["nextcloud"]["location"] = config.nextcloud_block_device
|
|
||||||
if "pleroma" not in user_data:
|
|
||||||
user_data["pleroma"] = {}
|
|
||||||
user_data["pleroma"]["location"] = config.pleroma_block_device
|
|
||||||
|
|
||||||
user_data["useBinds"] = True
|
|
||||||
|
|
||||||
|
|
||||||
def move_folder(
|
|
||||||
data_path: pathlib.Path, bind_path: pathlib.Path, user: str, group: str
|
|
||||||
):
|
|
||||||
"""Move folder from data to bind."""
|
|
||||||
if data_path.exists():
|
|
||||||
shutil.move(str(data_path), str(bind_path))
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
data_path.mkdir(mode=0o750, parents=True, exist_ok=True)
|
|
||||||
except Exception as error:
|
|
||||||
print(f"Error creating data path: {error}")
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
shutil.chown(str(bind_path), user=user, group=group)
|
|
||||||
shutil.chown(str(data_path), user=user, group=group)
|
|
||||||
except LookupError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess.run(["mount", "--bind", str(bind_path), str(data_path)], check=True)
|
|
||||||
except subprocess.CalledProcessError as error:
|
|
||||||
print(error)
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess.run(["chown", "-R", f"{user}:{group}", str(data_path)], check=True)
|
|
||||||
except subprocess.CalledProcessError as error:
|
|
||||||
print(error)
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def migrate_to_binds(config: BindMigrationConfig, job: Job):
|
|
||||||
"""Migrate app data to binds."""
|
|
||||||
|
|
||||||
# Exit if migration is already done
|
|
||||||
if is_bind_migrated():
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error="Migration already done.",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=0,
|
|
||||||
status_text="Checking if all volumes are available.",
|
|
||||||
)
|
|
||||||
# Get block devices.
|
|
||||||
block_devices = BlockDevices().get_block_devices()
|
|
||||||
block_device_names = [device.name for device in block_devices]
|
|
||||||
|
|
||||||
# Get all unique required block devices
|
|
||||||
required_block_devices = []
|
|
||||||
for block_device_name in config.__dict__.values():
|
|
||||||
if block_device_name not in required_block_devices:
|
|
||||||
required_block_devices.append(block_device_name)
|
|
||||||
|
|
||||||
# Check if all block devices from config are present.
|
|
||||||
for block_device_name in required_block_devices:
|
|
||||||
if block_device_name not in block_device_names:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error=f"Block device {block_device_name} not found.",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Make sure all required block devices are mounted.
|
|
||||||
# sda1 is the root partition and is always mounted.
|
|
||||||
for block_device_name in required_block_devices:
|
|
||||||
if block_device_name == "sda1":
|
|
||||||
continue
|
|
||||||
block_device = BlockDevices().get_block_device(block_device_name)
|
|
||||||
if block_device is None:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error=f"Block device {block_device_name} not found.",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
if f"/volumes/{block_device_name}" not in block_device.mountpoints:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error=f"Block device {block_device_name} not mounted.",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Make sure /volumes/sda1 exists.
|
|
||||||
pathlib.Path("/volumes/sda1").mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=5,
|
|
||||||
status_text="Activating binds in NixOS config.",
|
|
||||||
)
|
|
||||||
|
|
||||||
activate_binds(config)
|
|
||||||
|
|
||||||
# Perform migration of Nextcloud.
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=10,
|
|
||||||
status_text="Migrating Nextcloud.",
|
|
||||||
)
|
|
||||||
|
|
||||||
Nextcloud().stop()
|
|
||||||
|
|
||||||
# If /volumes/sda1/nextcloud or /volumes/sdb/nextcloud exists, skip it.
|
|
||||||
if not pathlib.Path("/volumes/sda1/nextcloud").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/nextcloud").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/nextcloud"),
|
|
||||||
bind_path=pathlib.Path(
|
|
||||||
f"/volumes/{config.nextcloud_block_device}/nextcloud"
|
|
||||||
),
|
|
||||||
user="nextcloud",
|
|
||||||
group="nextcloud",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Start Nextcloud
|
|
||||||
Nextcloud().start()
|
|
||||||
|
|
||||||
# Perform migration of Bitwarden
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=28,
|
|
||||||
status_text="Migrating Bitwarden.",
|
|
||||||
)
|
|
||||||
|
|
||||||
Bitwarden().stop()
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/bitwarden").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/bitwarden").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/bitwarden"),
|
|
||||||
bind_path=pathlib.Path(
|
|
||||||
f"/volumes/{config.bitwarden_block_device}/bitwarden"
|
|
||||||
),
|
|
||||||
user="vaultwarden",
|
|
||||||
group="vaultwarden",
|
|
||||||
)
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/bitwarden_rs").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/bitwarden_rs").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/bitwarden_rs"),
|
|
||||||
bind_path=pathlib.Path(
|
|
||||||
f"/volumes/{config.bitwarden_block_device}/bitwarden_rs"
|
|
||||||
),
|
|
||||||
user="vaultwarden",
|
|
||||||
group="vaultwarden",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Start Bitwarden
|
|
||||||
Bitwarden().start()
|
|
||||||
|
|
||||||
# Perform migration of Gitea
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=46,
|
|
||||||
status_text="Migrating Gitea.",
|
|
||||||
)
|
|
||||||
|
|
||||||
Gitea().stop()
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/gitea").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/gitea").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/gitea"),
|
|
||||||
bind_path=pathlib.Path(f"/volumes/{config.gitea_block_device}/gitea"),
|
|
||||||
user="gitea",
|
|
||||||
group="gitea",
|
|
||||||
)
|
|
||||||
|
|
||||||
Gitea().start()
|
|
||||||
|
|
||||||
# Perform migration of Mail server
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=64,
|
|
||||||
status_text="Migrating Mail server.",
|
|
||||||
)
|
|
||||||
|
|
||||||
MailServer().stop()
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/vmail").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/vmail").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/vmail"),
|
|
||||||
bind_path=pathlib.Path(f"/volumes/{config.email_block_device}/vmail"),
|
|
||||||
user="virtualMail",
|
|
||||||
group="virtualMail",
|
|
||||||
)
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/sieve").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/sieve").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/sieve"),
|
|
||||||
bind_path=pathlib.Path(f"/volumes/{config.email_block_device}/sieve"),
|
|
||||||
user="virtualMail",
|
|
||||||
group="virtualMail",
|
|
||||||
)
|
|
||||||
|
|
||||||
MailServer().start()
|
|
||||||
|
|
||||||
# Perform migration of Pleroma
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=82,
|
|
||||||
status_text="Migrating Pleroma.",
|
|
||||||
)
|
|
||||||
|
|
||||||
Pleroma().stop()
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/pleroma").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/pleroma").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/pleroma"),
|
|
||||||
bind_path=pathlib.Path(
|
|
||||||
f"/volumes/{config.pleroma_block_device}/pleroma"
|
|
||||||
),
|
|
||||||
user="pleroma",
|
|
||||||
group="pleroma",
|
|
||||||
)
|
|
||||||
|
|
||||||
if not pathlib.Path("/volumes/sda1/postgresql").exists():
|
|
||||||
if not pathlib.Path("/volumes/sdb/postgresql").exists():
|
|
||||||
move_folder(
|
|
||||||
data_path=pathlib.Path("/var/lib/postgresql"),
|
|
||||||
bind_path=pathlib.Path(
|
|
||||||
f"/volumes/{config.pleroma_block_device}/postgresql"
|
|
||||||
),
|
|
||||||
user="postgres",
|
|
||||||
group="postgres",
|
|
||||||
)
|
|
||||||
|
|
||||||
Pleroma().start()
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
progress=100,
|
|
||||||
status_text="Migration finished.",
|
|
||||||
result="Migration finished.",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def start_bind_migration(config: BindMigrationConfig) -> Job:
|
|
||||||
"""Start migration."""
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id="migrations.migrate_to_binds",
|
|
||||||
name="Migrate to binds",
|
|
||||||
description="Migration required to use the new disk space management.",
|
|
||||||
)
|
|
||||||
migrate_to_binds(config, job)
|
|
||||||
return job
|
|
|
@ -1,147 +0,0 @@
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
from typing import Tuple, Iterable
|
|
||||||
|
|
||||||
from selfprivacy_api.utils.huey import huey
|
|
||||||
|
|
||||||
from selfprivacy_api.jobs import JobStatus, Jobs, Job
|
|
||||||
|
|
||||||
|
|
||||||
class ShellException(Exception):
|
|
||||||
"""Shell-related errors"""
|
|
||||||
|
|
||||||
|
|
||||||
COMPLETED_WITH_ERROR = "Error occurred, please report this to the support chat."
|
|
||||||
RESULT_WAS_NOT_FOUND_ERROR = (
|
|
||||||
"We are sorry, garbage collection result was not found. "
|
|
||||||
"Something went wrong, please report this to the support chat."
|
|
||||||
)
|
|
||||||
CLEAR_COMPLETED = "Garbage collection completed."
|
|
||||||
|
|
||||||
|
|
||||||
def delete_old_gens_and_return_dead_report() -> str:
|
|
||||||
subprocess.run(
|
|
||||||
["nix-env", "-p", "/nix/var/nix/profiles/system", "--delete-generations old"],
|
|
||||||
check=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
result = subprocess.check_output(["nix-store", "--gc", "--print-dead"]).decode(
|
|
||||||
"utf-8"
|
|
||||||
)
|
|
||||||
|
|
||||||
return " " if result is None else result
|
|
||||||
|
|
||||||
|
|
||||||
def run_nix_collect_garbage() -> Iterable[bytes]:
|
|
||||||
process = subprocess.Popen(
|
|
||||||
["nix-store", "--gc"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
return process.stdout if process.stdout else iter([])
|
|
||||||
|
|
||||||
|
|
||||||
def parse_line(job: Job, line: str) -> Job:
|
|
||||||
"""
|
|
||||||
We parse the string for the presence of a final line,
|
|
||||||
with the final amount of space cleared.
|
|
||||||
Simply put, we're just looking for a similar string:
|
|
||||||
"1537 store paths deleted, 339.84 MiB freed".
|
|
||||||
"""
|
|
||||||
pattern = re.compile(r"[+-]?\d+\.\d+ \w+(?= freed)")
|
|
||||||
match = re.search(pattern, line)
|
|
||||||
|
|
||||||
if match is None:
|
|
||||||
raise ShellException("nix returned gibberish output")
|
|
||||||
|
|
||||||
else:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
status_text=CLEAR_COMPLETED,
|
|
||||||
result=f"{match.group(0)} have been cleared",
|
|
||||||
)
|
|
||||||
return job
|
|
||||||
|
|
||||||
|
|
||||||
def process_stream(job: Job, stream: Iterable[bytes], total_dead_packages: int) -> None:
|
|
||||||
completed_packages = 0
|
|
||||||
prev_progress = 0
|
|
||||||
|
|
||||||
for line in stream:
|
|
||||||
line = line.decode("utf-8")
|
|
||||||
|
|
||||||
if "deleting '/nix/store/" in line:
|
|
||||||
completed_packages += 1
|
|
||||||
percent = int((completed_packages / total_dead_packages) * 100)
|
|
||||||
|
|
||||||
if percent - prev_progress >= 5:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=percent,
|
|
||||||
status_text="Cleaning...",
|
|
||||||
)
|
|
||||||
prev_progress = percent
|
|
||||||
|
|
||||||
elif "store paths deleted," in line:
|
|
||||||
parse_line(job, line)
|
|
||||||
|
|
||||||
|
|
||||||
def get_dead_packages(output) -> Tuple[int, float]:
|
|
||||||
dead = len(re.findall("/nix/store/", output))
|
|
||||||
percent = 0
|
|
||||||
if dead != 0:
|
|
||||||
percent = 100 / dead
|
|
||||||
return dead, percent
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def calculate_and_clear_dead_paths(job: Job):
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=0,
|
|
||||||
status_text="Calculate the number of dead packages...",
|
|
||||||
)
|
|
||||||
|
|
||||||
dead_packages, package_equal_to_percent = get_dead_packages(
|
|
||||||
delete_old_gens_and_return_dead_report()
|
|
||||||
)
|
|
||||||
|
|
||||||
if dead_packages == 0:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
status_text="Nothing to clear",
|
|
||||||
result="System is clear",
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
progress=0,
|
|
||||||
status_text=f"Found {dead_packages} packages to remove!",
|
|
||||||
)
|
|
||||||
|
|
||||||
stream = run_nix_collect_garbage()
|
|
||||||
try:
|
|
||||||
process_stream(job, stream, dead_packages)
|
|
||||||
except ShellException as error:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
status_text=COMPLETED_WITH_ERROR,
|
|
||||||
error=RESULT_WAS_NOT_FOUND_ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def start_nix_collect_garbage() -> Job:
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id="maintenance.collect_nix_garbage",
|
|
||||||
name="Collect garbage",
|
|
||||||
description="Cleaning up unused packages",
|
|
||||||
)
|
|
||||||
|
|
||||||
calculate_and_clear_dead_paths(job=job)
|
|
||||||
|
|
||||||
return job
|
|
|
@ -1,57 +0,0 @@
|
||||||
import time
|
|
||||||
from selfprivacy_api.utils.huey import huey
|
|
||||||
from selfprivacy_api.jobs import JobStatus, Jobs
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def test_job():
|
|
||||||
job = Jobs.add(
|
|
||||||
type_id="test",
|
|
||||||
name="Test job",
|
|
||||||
description="This is a test job.",
|
|
||||||
status=JobStatus.CREATED,
|
|
||||||
status_text="",
|
|
||||||
progress=0,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Performing pre-move checks...",
|
|
||||||
progress=5,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Performing pre-move checks...",
|
|
||||||
progress=10,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Performing pre-move checks...",
|
|
||||||
progress=15,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Performing pre-move checks...",
|
|
||||||
progress=20,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Performing pre-move checks...",
|
|
||||||
progress=25,
|
|
||||||
)
|
|
||||||
time.sleep(5)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
status_text="Job finished.",
|
|
||||||
progress=100,
|
|
||||||
)
|
|
|
@ -1,136 +0,0 @@
|
||||||
"""
|
|
||||||
A task to start the system upgrade or rebuild by starting a systemd unit.
|
|
||||||
After starting, track the status of the systemd unit and update the Job
|
|
||||||
status accordingly.
|
|
||||||
"""
|
|
||||||
import subprocess
|
|
||||||
from selfprivacy_api.utils.huey import huey
|
|
||||||
from selfprivacy_api.jobs import JobStatus, Jobs, Job
|
|
||||||
from selfprivacy_api.utils.waitloop import wait_until_true
|
|
||||||
from selfprivacy_api.utils.systemd import (
|
|
||||||
get_service_status,
|
|
||||||
get_last_log_lines,
|
|
||||||
ServiceStatus,
|
|
||||||
)
|
|
||||||
|
|
||||||
START_TIMEOUT = 60 * 5
|
|
||||||
START_INTERVAL = 1
|
|
||||||
RUN_TIMEOUT = 60 * 60
|
|
||||||
RUN_INTERVAL = 5
|
|
||||||
|
|
||||||
|
|
||||||
def check_if_started(unit_name: str):
|
|
||||||
"""Check if the systemd unit has started"""
|
|
||||||
try:
|
|
||||||
status = get_service_status(unit_name)
|
|
||||||
if status == ServiceStatus.ACTIVE:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def check_running_status(job: Job, unit_name: str):
|
|
||||||
"""Check if the systemd unit is running"""
|
|
||||||
try:
|
|
||||||
status = get_service_status(unit_name)
|
|
||||||
if status == ServiceStatus.INACTIVE:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
result="System rebuilt.",
|
|
||||||
progress=100,
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
if status == ServiceStatus.FAILED:
|
|
||||||
log_lines = get_last_log_lines(unit_name, 10)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error="System rebuild failed. Last log lines:\n" + "\n".join(log_lines),
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
if status == ServiceStatus.ACTIVE:
|
|
||||||
log_lines = get_last_log_lines(unit_name, 1)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text=log_lines[0] if len(log_lines) > 0 else "",
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
return False
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def rebuild_system(job: Job, upgrade: bool = False):
|
|
||||||
"""
|
|
||||||
Broken out to allow calling it synchronously.
|
|
||||||
We cannot just block until task is done because it will require a second worker
|
|
||||||
Which we do not have
|
|
||||||
"""
|
|
||||||
|
|
||||||
unit_name = "sp-nixos-upgrade.service" if upgrade else "sp-nixos-rebuild.service"
|
|
||||||
try:
|
|
||||||
command = ["systemctl", "start", unit_name]
|
|
||||||
subprocess.run(
|
|
||||||
command,
|
|
||||||
check=True,
|
|
||||||
start_new_session=True,
|
|
||||||
shell=False,
|
|
||||||
)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Starting the system rebuild...",
|
|
||||||
)
|
|
||||||
# Wait for the systemd unit to start
|
|
||||||
try:
|
|
||||||
wait_until_true(
|
|
||||||
lambda: check_if_started(unit_name),
|
|
||||||
timeout_sec=START_TIMEOUT,
|
|
||||||
interval=START_INTERVAL,
|
|
||||||
)
|
|
||||||
except TimeoutError:
|
|
||||||
log_lines = get_last_log_lines(unit_name, 10)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error="System rebuild timed out. Last log lines:\n"
|
|
||||||
+ "\n".join(log_lines),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.RUNNING,
|
|
||||||
status_text="Rebuilding the system...",
|
|
||||||
)
|
|
||||||
# Wait for the systemd unit to finish
|
|
||||||
try:
|
|
||||||
wait_until_true(
|
|
||||||
lambda: check_running_status(job, unit_name),
|
|
||||||
timeout_sec=RUN_TIMEOUT,
|
|
||||||
interval=RUN_INTERVAL,
|
|
||||||
)
|
|
||||||
except TimeoutError:
|
|
||||||
log_lines = get_last_log_lines(unit_name, 10)
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
error="System rebuild timed out. Last log lines:\n"
|
|
||||||
+ "\n".join(log_lines),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.ERROR,
|
|
||||||
status_text=str(e),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def rebuild_system_task(job: Job, upgrade: bool = False):
|
|
||||||
"""Rebuild the system"""
|
|
||||||
rebuild_system(job, upgrade)
|
|
|
@ -1,48 +0,0 @@
|
||||||
"""Migrations module.
|
|
||||||
Migrations module is introduced in v1.1.1 and provides one-shot
|
|
||||||
migrations which cannot be performed from the NixOS configuration file changes.
|
|
||||||
These migrations are checked and ran before every start of the API.
|
|
||||||
|
|
||||||
You can disable certain migrations if needed by creating an array
|
|
||||||
at api.skippedMigrations in userdata.json and populating it
|
|
||||||
with IDs of the migrations to skip.
|
|
||||||
Adding DISABLE_ALL to that array disables the migrations module entirely.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from selfprivacy_api.utils import ReadUserData, UserDataFiles
|
|
||||||
from selfprivacy_api.migrations.write_token_to_redis import WriteTokenToRedis
|
|
||||||
from selfprivacy_api.migrations.check_for_system_rebuild_jobs import (
|
|
||||||
CheckForSystemRebuildJobs,
|
|
||||||
)
|
|
||||||
|
|
||||||
migrations = [
|
|
||||||
WriteTokenToRedis(),
|
|
||||||
CheckForSystemRebuildJobs(),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def run_migrations():
|
|
||||||
"""
|
|
||||||
Go over all migrations. If they are not skipped in userdata file, run them
|
|
||||||
if the migration needed.
|
|
||||||
"""
|
|
||||||
with ReadUserData(UserDataFiles.SECRETS) as data:
|
|
||||||
if "api" not in data:
|
|
||||||
skipped_migrations = []
|
|
||||||
elif "skippedMigrations" not in data["api"]:
|
|
||||||
skipped_migrations = []
|
|
||||||
else:
|
|
||||||
skipped_migrations = data["api"].get("skippedMigrations", [])
|
|
||||||
|
|
||||||
if "DISABLE_ALL" in skipped_migrations:
|
|
||||||
return
|
|
||||||
|
|
||||||
for migration in migrations:
|
|
||||||
if migration.get_migration_name() not in skipped_migrations:
|
|
||||||
try:
|
|
||||||
if migration.is_migration_needed():
|
|
||||||
migration.migrate()
|
|
||||||
except Exception as err:
|
|
||||||
print(f"Error while migrating {migration.get_migration_name()}")
|
|
||||||
print(err)
|
|
||||||
print("Skipping this migration")
|
|
|
@ -1,47 +0,0 @@
|
||||||
from selfprivacy_api.migrations.migration import Migration
|
|
||||||
from selfprivacy_api.jobs import JobStatus, Jobs
|
|
||||||
|
|
||||||
|
|
||||||
class CheckForSystemRebuildJobs(Migration):
|
|
||||||
"""Check if there are unfinished system rebuild jobs and finish them"""
|
|
||||||
|
|
||||||
def get_migration_name(self):
|
|
||||||
return "check_for_system_rebuild_jobs"
|
|
||||||
|
|
||||||
def get_migration_description(self):
|
|
||||||
return "Check if there are unfinished system rebuild jobs and finish them"
|
|
||||||
|
|
||||||
def is_migration_needed(self):
|
|
||||||
# Check if there are any unfinished system rebuild jobs
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if (
|
|
||||||
job.type_id
|
|
||||||
in [
|
|
||||||
"system.nixos.rebuild",
|
|
||||||
"system.nixos.upgrade",
|
|
||||||
]
|
|
||||||
) and job.status in [
|
|
||||||
JobStatus.CREATED,
|
|
||||||
JobStatus.RUNNING,
|
|
||||||
]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def migrate(self):
|
|
||||||
# As the API is restarted, we assume that the jobs are finished
|
|
||||||
for job in Jobs.get_jobs():
|
|
||||||
if (
|
|
||||||
job.type_id
|
|
||||||
in [
|
|
||||||
"system.nixos.rebuild",
|
|
||||||
"system.nixos.upgrade",
|
|
||||||
]
|
|
||||||
) and job.status in [
|
|
||||||
JobStatus.CREATED,
|
|
||||||
JobStatus.RUNNING,
|
|
||||||
]:
|
|
||||||
Jobs.update(
|
|
||||||
job=job,
|
|
||||||
status=JobStatus.FINISHED,
|
|
||||||
result="System rebuilt.",
|
|
||||||
progress=100,
|
|
||||||
)
|
|
|
@ -1,28 +0,0 @@
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(ABC):
|
|
||||||
"""
|
|
||||||
Abstract Migration class
|
|
||||||
This class is used to define the structure of a migration
|
|
||||||
Migration has a function is_migration_needed() that returns True or False
|
|
||||||
Migration has a function migrate() that does the migration
|
|
||||||
Migration has a function get_migration_name() that returns the migration name
|
|
||||||
Migration has a function get_migration_description() that returns the migration description
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_migration_name(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_migration_description(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def is_migration_needed(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def migrate(self):
|
|
||||||
pass
|
|
|
@ -1,63 +0,0 @@
|
||||||
from datetime import datetime
|
|
||||||
from typing import Optional
|
|
||||||
from selfprivacy_api.migrations.migration import Migration
|
|
||||||
from selfprivacy_api.models.tokens.token import Token
|
|
||||||
|
|
||||||
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
|
|
||||||
RedisTokensRepository,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
|
|
||||||
AbstractTokensRepository,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.utils import ReadUserData, UserDataFiles
|
|
||||||
|
|
||||||
|
|
||||||
class WriteTokenToRedis(Migration):
|
|
||||||
"""Load Json tokens into Redis"""
|
|
||||||
|
|
||||||
def get_migration_name(self):
|
|
||||||
return "write_token_to_redis"
|
|
||||||
|
|
||||||
def get_migration_description(self):
|
|
||||||
return "Loads the initial token into redis token storage"
|
|
||||||
|
|
||||||
def is_repo_empty(self, repo: AbstractTokensRepository) -> bool:
|
|
||||||
if repo.get_tokens() != []:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_token_from_json(self) -> Optional[Token]:
|
|
||||||
try:
|
|
||||||
with ReadUserData(UserDataFiles.SECRETS) as userdata:
|
|
||||||
return Token(
|
|
||||||
token=userdata["api"]["token"],
|
|
||||||
device_name="Initial device",
|
|
||||||
created_at=datetime.now(),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def is_migration_needed(self):
|
|
||||||
try:
|
|
||||||
if self.get_token_from_json() is not None and self.is_repo_empty(
|
|
||||||
RedisTokensRepository()
|
|
||||||
):
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def migrate(self):
|
|
||||||
# Write info about providers to userdata.json
|
|
||||||
try:
|
|
||||||
token = self.get_token_from_json()
|
|
||||||
if token is None:
|
|
||||||
print("No token found in secrets.json")
|
|
||||||
return
|
|
||||||
RedisTokensRepository()._store_token(token)
|
|
||||||
|
|
||||||
print("Done")
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
print("Error migrating access tokens from json to redis")
|
|
|
@ -1,11 +0,0 @@
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
"""for storage in Redis"""
|
|
||||||
|
|
||||||
|
|
||||||
class BackupProviderModel(BaseModel):
|
|
||||||
kind: str
|
|
||||||
login: str
|
|
||||||
key: str
|
|
||||||
location: str
|
|
||||||
repo_id: str # for app usage, not for us
|
|
|
@ -1,11 +0,0 @@
|
||||||
import datetime
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
|
||||||
|
|
||||||
|
|
||||||
class Snapshot(BaseModel):
|
|
||||||
id: str
|
|
||||||
service_name: str
|
|
||||||
created_at: datetime.datetime
|
|
||||||
reason: BackupReason = BackupReason.EXPLICIT
|
|
|
@ -1,24 +0,0 @@
|
||||||
from enum import Enum
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceStatus(Enum):
|
|
||||||
"""Enum for service status"""
|
|
||||||
|
|
||||||
ACTIVE = "ACTIVE"
|
|
||||||
RELOADING = "RELOADING"
|
|
||||||
INACTIVE = "INACTIVE"
|
|
||||||
FAILED = "FAILED"
|
|
||||||
ACTIVATING = "ACTIVATING"
|
|
||||||
DEACTIVATING = "DEACTIVATING"
|
|
||||||
OFF = "OFF"
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceDnsRecord(BaseModel):
|
|
||||||
type: str
|
|
||||||
name: str
|
|
||||||
content: str
|
|
||||||
ttl: int
|
|
||||||
display_name: str
|
|
||||||
priority: Optional[int] = None
|
|
|
@ -1,48 +0,0 @@
|
||||||
"""
|
|
||||||
New device key used to obtain access token.
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
import secrets
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from mnemonic import Mnemonic
|
|
||||||
|
|
||||||
from selfprivacy_api.models.tokens.time import is_past
|
|
||||||
|
|
||||||
|
|
||||||
class NewDeviceKey(BaseModel):
|
|
||||||
"""
|
|
||||||
Recovery key used to obtain access token.
|
|
||||||
|
|
||||||
Recovery key has a key string, date of creation, date of expiration.
|
|
||||||
"""
|
|
||||||
|
|
||||||
key: str
|
|
||||||
created_at: datetime
|
|
||||||
expires_at: datetime
|
|
||||||
|
|
||||||
def is_valid(self) -> bool:
|
|
||||||
"""
|
|
||||||
Check if key is valid.
|
|
||||||
"""
|
|
||||||
if is_past(self.expires_at):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def as_mnemonic(self) -> str:
|
|
||||||
"""
|
|
||||||
Get the key as a mnemonic.
|
|
||||||
"""
|
|
||||||
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(self.key))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def generate() -> "NewDeviceKey":
|
|
||||||
"""
|
|
||||||
Factory to generate a random token.
|
|
||||||
"""
|
|
||||||
creation_date = datetime.now(timezone.utc)
|
|
||||||
key = secrets.token_bytes(16).hex()
|
|
||||||
return NewDeviceKey(
|
|
||||||
key=key,
|
|
||||||
created_at=creation_date,
|
|
||||||
expires_at=creation_date + timedelta(minutes=10),
|
|
||||||
)
|
|
|
@ -1,61 +0,0 @@
|
||||||
"""
|
|
||||||
Recovery key used to obtain access token.
|
|
||||||
|
|
||||||
Recovery key has a token string, date of creation, optional date of expiration and optional count of uses left.
|
|
||||||
"""
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
import secrets
|
|
||||||
from typing import Optional
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from mnemonic import Mnemonic
|
|
||||||
|
|
||||||
from selfprivacy_api.models.tokens.time import is_past, ensure_timezone
|
|
||||||
|
|
||||||
|
|
||||||
class RecoveryKey(BaseModel):
|
|
||||||
"""
|
|
||||||
Recovery key used to obtain access token.
|
|
||||||
|
|
||||||
Recovery key has a key string, date of creation, optional date of expiration and optional count of uses left.
|
|
||||||
"""
|
|
||||||
|
|
||||||
key: str
|
|
||||||
created_at: datetime
|
|
||||||
expires_at: Optional[datetime]
|
|
||||||
uses_left: Optional[int]
|
|
||||||
|
|
||||||
def is_valid(self) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the recovery key is valid.
|
|
||||||
"""
|
|
||||||
if self.expires_at is not None and is_past(self.expires_at):
|
|
||||||
return False
|
|
||||||
if self.uses_left is not None and self.uses_left <= 0:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def as_mnemonic(self) -> str:
|
|
||||||
"""
|
|
||||||
Get the recovery key as a mnemonic.
|
|
||||||
"""
|
|
||||||
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(self.key))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def generate(
|
|
||||||
expiration: Optional[datetime],
|
|
||||||
uses_left: Optional[int],
|
|
||||||
) -> "RecoveryKey":
|
|
||||||
"""
|
|
||||||
Factory to generate a random token.
|
|
||||||
If passed naive time as expiration, assumes utc
|
|
||||||
"""
|
|
||||||
creation_date = datetime.now(timezone.utc)
|
|
||||||
if expiration is not None:
|
|
||||||
expiration = ensure_timezone(expiration)
|
|
||||||
key = secrets.token_bytes(24).hex()
|
|
||||||
return RecoveryKey(
|
|
||||||
key=key,
|
|
||||||
created_at=creation_date,
|
|
||||||
expires_at=expiration,
|
|
||||||
uses_left=uses_left,
|
|
||||||
)
|
|
|
@ -1,14 +0,0 @@
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
|
|
||||||
def is_past(dt: datetime) -> bool:
|
|
||||||
# we cannot compare a naive now()
|
|
||||||
# to dt which might be tz-aware or unaware
|
|
||||||
dt = ensure_timezone(dt)
|
|
||||||
return dt < datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_timezone(dt: datetime) -> datetime:
|
|
||||||
if dt.tzinfo is None or dt.tzinfo.utcoffset(None) is None:
|
|
||||||
dt = dt.replace(tzinfo=timezone.utc)
|
|
||||||
return dt
|
|
|
@ -1,33 +0,0 @@
|
||||||
"""
|
|
||||||
Model of the access token.
|
|
||||||
|
|
||||||
Access token has a token string, device name and date of creation.
|
|
||||||
"""
|
|
||||||
from datetime import datetime
|
|
||||||
import secrets
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class Token(BaseModel):
|
|
||||||
"""
|
|
||||||
Model of the access token.
|
|
||||||
|
|
||||||
Access token has a token string, device name and date of creation.
|
|
||||||
"""
|
|
||||||
|
|
||||||
token: str
|
|
||||||
device_name: str
|
|
||||||
created_at: datetime
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def generate(device_name: str) -> "Token":
|
|
||||||
"""
|
|
||||||
Factory to generate a random token.
|
|
||||||
"""
|
|
||||||
creation_date = datetime.now()
|
|
||||||
token = secrets.token_urlsafe(32)
|
|
||||||
return Token(
|
|
||||||
token=token,
|
|
||||||
device_name=device_name,
|
|
||||||
created_at=creation_date,
|
|
||||||
)
|
|
|
@ -1,225 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Optional
|
|
||||||
from mnemonic import Mnemonic
|
|
||||||
from secrets import randbelow
|
|
||||||
import re
|
|
||||||
|
|
||||||
from selfprivacy_api.models.tokens.token import Token
|
|
||||||
from selfprivacy_api.repositories.tokens.exceptions import (
|
|
||||||
TokenNotFound,
|
|
||||||
InvalidMnemonic,
|
|
||||||
RecoveryKeyNotFound,
|
|
||||||
NewDeviceKeyNotFound,
|
|
||||||
)
|
|
||||||
from selfprivacy_api.models.tokens.recovery_key import RecoveryKey
|
|
||||||
from selfprivacy_api.models.tokens.new_device_key import NewDeviceKey
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractTokensRepository(ABC):
|
|
||||||
def get_token_by_token_string(self, token_string: str) -> Token:
|
|
||||||
"""Get the token by token"""
|
|
||||||
tokens = self.get_tokens()
|
|
||||||
for token in tokens:
|
|
||||||
if token.token == token_string:
|
|
||||||
return token
|
|
||||||
|
|
||||||
raise TokenNotFound("Token not found!")
|
|
||||||
|
|
||||||
def get_token_by_name(self, token_name: str) -> Token:
|
|
||||||
"""Get the token by name"""
|
|
||||||
tokens = self.get_tokens()
|
|
||||||
for token in tokens:
|
|
||||||
if token.device_name == token_name:
|
|
||||||
return token
|
|
||||||
|
|
||||||
raise TokenNotFound("Token not found!")
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_tokens(self) -> list[Token]:
|
|
||||||
"""Get the tokens"""
|
|
||||||
|
|
||||||
def create_token(self, device_name: str) -> Token:
|
|
||||||
"""Create new token"""
|
|
||||||
unique_name = self._make_unique_device_name(device_name)
|
|
||||||
new_token = Token.generate(unique_name)
|
|
||||||
|
|
||||||
self._store_token(new_token)
|
|
||||||
|
|
||||||
return new_token
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def delete_token(self, input_token: Token) -> None:
|
|
||||||
"""Delete the token"""
|
|
||||||
|
|
||||||
def refresh_token(self, input_token: Token) -> Token:
|
|
||||||
"""Change the token field of the existing token"""
|
|
||||||
new_token = Token.generate(device_name=input_token.device_name)
|
|
||||||
new_token.created_at = input_token.created_at
|
|
||||||
|
|
||||||
if input_token in self.get_tokens():
|
|
||||||
self.delete_token(input_token)
|
|
||||||
self._store_token(new_token)
|
|
||||||
return new_token
|
|
||||||
|
|
||||||
raise TokenNotFound("Token not found!")
|
|
||||||
|
|
||||||
def is_token_valid(self, token_string: str) -> bool:
|
|
||||||
"""Check if the token is valid"""
|
|
||||||
return token_string in [token.token for token in self.get_tokens()]
|
|
||||||
|
|
||||||
def is_token_name_exists(self, token_name: str) -> bool:
|
|
||||||
"""Check if the token name exists"""
|
|
||||||
return token_name in [token.device_name for token in self.get_tokens()]
|
|
||||||
|
|
||||||
def is_token_name_pair_valid(self, token_name: str, token_string: str) -> bool:
|
|
||||||
"""Check if the token name and token are valid"""
|
|
||||||
try:
|
|
||||||
token = self.get_token_by_name(token_name)
|
|
||||||
if token is None:
|
|
||||||
return False
|
|
||||||
except TokenNotFound:
|
|
||||||
return False
|
|
||||||
return token.token == token_string
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_recovery_key(self) -> Optional[RecoveryKey]:
|
|
||||||
"""Get the recovery key"""
|
|
||||||
|
|
||||||
def create_recovery_key(
|
|
||||||
self,
|
|
||||||
expiration: Optional[datetime],
|
|
||||||
uses_left: Optional[int],
|
|
||||||
) -> RecoveryKey:
|
|
||||||
"""Create the recovery key"""
|
|
||||||
recovery_key = RecoveryKey.generate(expiration, uses_left)
|
|
||||||
self._store_recovery_key(recovery_key)
|
|
||||||
return recovery_key
|
|
||||||
|
|
||||||
def use_mnemonic_recovery_key(
|
|
||||||
self, mnemonic_phrase: str, device_name: str
|
|
||||||
) -> Token:
|
|
||||||
"""Use the mnemonic recovery key and create a new token with the given name"""
|
|
||||||
if not self.is_recovery_key_valid():
|
|
||||||
raise RecoveryKeyNotFound("Recovery key not found")
|
|
||||||
|
|
||||||
recovery_key = self.get_recovery_key()
|
|
||||||
|
|
||||||
if recovery_key is None:
|
|
||||||
raise RecoveryKeyNotFound("Recovery key not found")
|
|
||||||
|
|
||||||
recovery_hex_key = recovery_key.key
|
|
||||||
if not self._assert_mnemonic(recovery_hex_key, mnemonic_phrase):
|
|
||||||
raise RecoveryKeyNotFound("Recovery key not found")
|
|
||||||
|
|
||||||
new_token = self.create_token(device_name=device_name)
|
|
||||||
|
|
||||||
self._decrement_recovery_token()
|
|
||||||
|
|
||||||
return new_token
|
|
||||||
|
|
||||||
def is_recovery_key_valid(self) -> bool:
|
|
||||||
"""Check if the recovery key is valid"""
|
|
||||||
recovery_key = self.get_recovery_key()
|
|
||||||
if recovery_key is None:
|
|
||||||
return False
|
|
||||||
return recovery_key.is_valid()
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _store_recovery_key(self, recovery_key: RecoveryKey) -> None:
|
|
||||||
"""Store recovery key directly"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _delete_recovery_key(self) -> None:
|
|
||||||
"""Delete the recovery key"""
|
|
||||||
|
|
||||||
def get_new_device_key(self) -> NewDeviceKey:
|
|
||||||
"""Creates and returns the new device key"""
|
|
||||||
new_device_key = NewDeviceKey.generate()
|
|
||||||
self._store_new_device_key(new_device_key)
|
|
||||||
|
|
||||||
return new_device_key
|
|
||||||
|
|
||||||
def _store_new_device_key(self, new_device_key: NewDeviceKey) -> None:
|
|
||||||
"""Store new device key directly"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def delete_new_device_key(self) -> None:
|
|
||||||
"""Delete the new device key"""
|
|
||||||
|
|
||||||
def use_mnemonic_new_device_key(
|
|
||||||
self, mnemonic_phrase: str, device_name: str
|
|
||||||
) -> Token:
|
|
||||||
"""Use the mnemonic new device key"""
|
|
||||||
new_device_key = self._get_stored_new_device_key()
|
|
||||||
if not new_device_key:
|
|
||||||
raise NewDeviceKeyNotFound
|
|
||||||
|
|
||||||
if not new_device_key.is_valid():
|
|
||||||
raise NewDeviceKeyNotFound
|
|
||||||
|
|
||||||
if not self._assert_mnemonic(new_device_key.key, mnemonic_phrase):
|
|
||||||
raise NewDeviceKeyNotFound("Phrase is not token!")
|
|
||||||
|
|
||||||
new_token = self.create_token(device_name=device_name)
|
|
||||||
self.delete_new_device_key()
|
|
||||||
|
|
||||||
return new_token
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
for token in self.get_tokens():
|
|
||||||
self.delete_token(token)
|
|
||||||
self.delete_new_device_key()
|
|
||||||
self._delete_recovery_key()
|
|
||||||
|
|
||||||
def clone(self, source: AbstractTokensRepository) -> None:
|
|
||||||
"""Clone the state of another repository to this one"""
|
|
||||||
self.reset()
|
|
||||||
for token in source.get_tokens():
|
|
||||||
self._store_token(token)
|
|
||||||
|
|
||||||
recovery_key = source.get_recovery_key()
|
|
||||||
if recovery_key is not None:
|
|
||||||
self._store_recovery_key(recovery_key)
|
|
||||||
|
|
||||||
new_device_key = source._get_stored_new_device_key()
|
|
||||||
if new_device_key is not None:
|
|
||||||
self._store_new_device_key(new_device_key)
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _store_token(self, new_token: Token):
|
|
||||||
"""Store a token directly"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _decrement_recovery_token(self):
|
|
||||||
"""Decrement recovery key use count by one"""
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _get_stored_new_device_key(self) -> Optional[NewDeviceKey]:
|
|
||||||
"""Retrieves new device key that is already stored."""
|
|
||||||
|
|
||||||
def _make_unique_device_name(self, name: str) -> str:
|
|
||||||
"""Token name must be an alphanumeric string and not empty.
|
|
||||||
Replace invalid characters with '_'
|
|
||||||
If name exists, add a random number to the end of the name until it is unique.
|
|
||||||
"""
|
|
||||||
if not re.match("^[a-zA-Z0-9]*$", name):
|
|
||||||
name = re.sub("[^a-zA-Z0-9]", "_", name)
|
|
||||||
if name == "":
|
|
||||||
name = "Unknown device"
|
|
||||||
while self.is_token_name_exists(name):
|
|
||||||
name += str(randbelow(10))
|
|
||||||
return name
|
|
||||||
|
|
||||||
# TODO: find a proper place for it
|
|
||||||
def _assert_mnemonic(self, hex_key: str, mnemonic_phrase: str):
|
|
||||||
"""Return true if hex string matches the phrase, false otherwise
|
|
||||||
Raise an InvalidMnemonic error if not mnemonic"""
|
|
||||||
recovery_token = bytes.fromhex(hex_key)
|
|
||||||
if not Mnemonic(language="english").check(mnemonic_phrase):
|
|
||||||
raise InvalidMnemonic("Phrase is not mnemonic!")
|
|
||||||
|
|
||||||
phrase_bytes = Mnemonic(language="english").to_entropy(mnemonic_phrase)
|
|
||||||
return phrase_bytes == recovery_token
|
|
|
@ -1,14 +0,0 @@
|
||||||
class TokenNotFound(Exception):
|
|
||||||
"""Token not found!"""
|
|
||||||
|
|
||||||
|
|
||||||
class RecoveryKeyNotFound(Exception):
|
|
||||||
"""Recovery key not found!"""
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidMnemonic(Exception):
|
|
||||||
"""Phrase is not mnemonic!"""
|
|
||||||
|
|
||||||
|
|
||||||
class NewDeviceKeyNotFound(Exception):
|
|
||||||
"""New device key not found!"""
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue