Compare commits
1276 Commits
Author | SHA1 | Date |
---|---|---|
Inex Code | 524adaa8bc | |
houkime | 5e93e6499f | |
houkime | 3302fe2818 | |
Houkime | 9ee72c1fcb | |
Houkime | 28556bd22d | |
Houkime | c5b227226c | |
Inex Code | 5ec677339b | |
Houkime | f2446dcee2 | |
Houkime | 97960f77f2 | |
Houkime | 677ed27773 | |
Houkime | b40df670f8 | |
Houkime | b36701e31c | |
Houkime | b39558ea1f | |
Houkime | 6f38b2309f | |
Houkime | baf7843349 | |
Houkime | 8e48a5ad5f | |
Houkime | fde461b4b9 | |
Houkime | 9954737791 | |
Houkime | 2b19633cbd | |
Houkime | 83592b7bf4 | |
houkime | efc6b47cfe | |
Houkime | b2edfe784a | |
Houkime | 6e29da4a4f | |
Houkime | 12b2153b7c | |
Houkime | 8c8c9a51cc | |
Houkime | fed5735b24 | |
Houkime | b257d7f39e | |
Houkime | 70a0287794 | |
Houkime | 534d965cab | |
Houkime | f333e791e1 | |
houkime | 962e8d5ca7 | |
Alexander | 5e29816c84 | |
Alexander | 53ec774c90 | |
Inex Code | bda21b7507 | |
Inex Code | 2d5ac51c06 | |
Alexander | 61b9a00cea | |
houkime | edcc7860e4 | |
Houkime | 64da8503dd | |
houkime | d464f3b82d | |
Alexander | bddc6d1831 | |
Alexander | 5d01c25f3b | |
Alexander | 69774ba186 | |
Inex Code | 1f1fcc223b | |
Inex Code | a543f6da2a | |
Inex Code | cf2f153cfe | |
Inex Code | 0eff0ef735 | |
Houkime | 7dae81530e | |
Houkime | fd43a6ccf1 | |
Houkime | eeef2891c9 | |
Houkime | 3f9d2b2481 | |
Houkime | 305e5cc2c3 | |
Houkime | 1e51f51844 | |
Houkime | 235c59b556 | |
Houkime | ddca1b0cde | |
Houkime | c22802f693 | |
Houkime | 17a1e34c0d | |
Houkime | d7ef2ed09a | |
Houkime | 7fd09982a4 | |
Houkime | b054235d96 | |
Houkime | 2519a50aac | |
Houkime | d34db3d661 | |
Houkime | 28fdf8fb49 | |
def | 18327ffa85 | |
def | b5183948af | |
def | e01b8ed8f0 | |
def | 5cd1e28632 | |
Inex Code | f895f2a38b | |
Inex Code | 8a607b9609 | |
Inex Code | c733cfeb9e | |
Inex Code | 71433da424 | |
Houkime | ee7c41e0c2 | |
Houkime | 1bed9d87ca | |
Houkime | 2c1c783b5e | |
Houkime | 8402f66a33 | |
Houkime | 1599f601a2 | |
Houkime | 0068272382 | |
Houkime | 18934a53e6 | |
Houkime | baaf3299ce | |
Houkime | f059c83b57 | |
Houkime | fb41c092f1 | |
Houkime | c947922a5d | |
Houkime | b22dfc0469 | |
Houkime | b3c7e2fa9e | |
Houkime | 6cd1d27902 | |
Houkime | e42da357fb | |
Houkime | 2863dd9763 | |
def | 0309e6b76e | |
def | f4739d4539 | |
def | 20c089154d | |
def | e703206e9d | |
Inex Code | 96f8aad146 | |
Inex Code | 0e94590420 | |
Inex Code | 36d026a8ca | |
Inex Code | 8cb812be56 | |
Houkime | 7ccf495958 | |
Houkime | f840a6e204 | |
Houkime | f5d7666614 | |
Houkime | 76f5b57c86 | |
Houkime | bf33fff20d | |
Houkime | 742bb239e7 | |
Inex Code | e16f4499f8 | |
Inex Code | 5616dbe77a | |
Inex Code | bbec9d9d33 | |
Inex Code | a4327fa669 | |
Inex Code | 2443ae0144 | |
Inex Code | c63552241c | |
Inex Code | d8666fa179 | |
Inex Code | 3965203d28 | |
Inex Code | 88b3a1e368 | |
Houkime | 52f8e283be | |
Houkime | 4757bedc4e | |
Houkime | bc0602bfcb | |
Inex Code | 4263f6fc63 | |
Alexander | ad6514658d | |
Alexander | b4fe79fdce | |
Inex Code | 25c691104f | |
Inex Code | 1a34558e23 | |
Inex Code | c851c3d193 | |
Inex Code | ad069a2ad2 | |
Inex Code | b98c020f23 | |
Inex Code | 94456af7d4 | |
Inex Code | ab1ca6e59c | |
Inex Code | 00bcca0f99 | |
Inex Code | 56de00226a | |
Inex Code | 2019da1e10 | |
Inex Code | d0eee319d3 | |
Alexander Tomokhov | 1ec6be59fd | |
Inex Code | 591138c353 | |
houkime | 9e8326bbcf | |
Houkime | eaf29178fe | |
Inex Code | 23adf95898 | |
Houkime | e13ad71a93 | |
houkime | e3761a200c | |
Houkime | c38e066507 | |
Houkime | 7ec62a8f79 | |
Inex Code | 0ccb85d5cc | |
Inex Code | e588bef597 | |
Inex Code | a0eb5d572f | |
Inex Code | b6f436d8b3 | |
Inex Code | 1e9744227b | |
Inex Code | 6b4920a0e7 | |
Houkime | 2b21df9ad3 | |
Houkime | 8e21e6d378 | |
Houkime | 8e551a8fe0 | |
Houkime | b8d02231cf | |
Houkime | 3080f5a18b | |
Houkime | 02b10b5078 | |
Houkime | 3e1fbdd4aa | |
Houkime | 41cd876f57 | |
Houkime | 43d9d47aed | |
Houkime | d96739c9ae | |
Houkime | 46cc3171ab | |
Houkime | 4a580e9b7b | |
Houkime | dcf6dd9ac5 | |
Houkime | ab081f6fbc | |
Houkime | e7c89e3e3f | |
Houkime | 2e775dad90 | |
Houkime | c470ec45e8 | |
Houkime | 2669e17c91 | |
Houkime | e835173fea | |
Houkime | adcdbfb368 | |
Houkime | fb15ef9388 | |
Houkime | 6ade95bbf1 | |
Houkime | 4b2eda25f6 | |
Houkime | bcbe1ff50c | |
Houkime | 2f25329c43 | |
Houkime | 04e3ee821f | |
Houkime | 03feab76b0 | |
Houkime | 7377c6375a | |
Houkime | 0669dc117b | |
Houkime | bc45a48af3 | |
Houkime | c5bb18215b | |
Houkime | b120858fa1 | |
Houkime | ca4b3c972d | |
Houkime | 946413615b | |
Houkime | 42d96bcd6d | |
Houkime | 16c2598e9b | |
Houkime | 65c2023366 | |
Houkime | 7c4c5929df | |
Houkime | b1eec1e37b | |
Houkime | f24aba8abb | |
Houkime | ac41cc00ce | |
Houkime | e7075546c5 | |
Houkime | 8fc7796da0 | |
Houkime | cf2935938d | |
Houkime | 7f1fcd66e3 | |
Houkime | 641959a083 | |
Houkime | 4b51f42e1b | |
Houkime | e1db00e509 | |
Houkime | 90c0c34a8d | |
Houkime | ee854aad1a | |
Houkime | a2065b87b7 | |
Houkime | 25d2537208 | |
Houkime | a5ab0df161 | |
Houkime | 0b90e3d20f | |
Houkime | 60c7e9a7e2 | |
Houkime | 9822d42dac | |
Houkime | 6c0d4ab42a | |
Houkime | ed4f6bfe32 | |
Houkime | 5651dcd94e | |
Houkime | f35280b764 | |
Houkime | e11e73f872 | |
Houkime | b644208c29 | |
Houkime | 1bb24b5f93 | |
Houkime | 4e730f015a | |
Houkime | 7c382c4779 | |
Houkime | f179cff0b4 | |
Houkime | 66561308bf | |
Houkime | 1b520a8093 | |
Houkime | 980d3622e8 | |
Houkime | 125d221442 | |
Houkime | 80e00740fb | |
Houkime | 113f512565 | |
Houkime | 15eafbb524 | |
Houkime | e63acc6d56 | |
Houkime | 9f04729296 | |
Houkime | 7038d69069 | |
Houkime | 5214d5e462 | |
Houkime | 2987065231 | |
Houkime | c1cc1e00ed | |
Houkime | 9d3fd45c2c | |
Houkime | bcf57ea738 | |
Houkime | bf0b774295 | |
Houkime | 834e8c0603 | |
Houkime | 22f157b6ff | |
Houkime | ffc60fc8b4 | |
Houkime | 5c1dd93931 | |
Houkime | 368ab22fbb | |
Houkime | f5999516fa | |
Houkime | 2e59e7e880 | |
Houkime | 1a65545c29 | |
Houkime | d7c75e0aa8 | |
Houkime | 0078ed0c3a | |
Houkime | c7be9c7427 | |
Houkime | 6f035dc0db | |
Houkime | 708c5cbc98 | |
Houkime | e1083f3221 | |
Houkime | 23cc33b9d9 | |
Houkime | 0b10c083af | |
Houkime | 267cdd391b | |
Houkime | aa287d9cf3 | |
Houkime | c83b1a3442 | |
Houkime | 9a1d82ec12 | |
Houkime | a12126f685 | |
Houkime | 9d7857cb3f | |
Houkime | 9a3800ac7b | |
Houkime | 1e77129f4f | |
Houkime | 018a8ce248 | |
Houkime | b06f1a4153 | |
Houkime | 83c639596c | |
Houkime | 6d244fb603 | |
Houkime | 34782a3ca8 | |
Houkime | bfdd98cb60 | |
Houkime | a163718120 | |
Houkime | b9f3aa49bd | |
Houkime | bd43bdb335 | |
Houkime | 47cfaad160 | |
Houkime | 92612906ef | |
Houkime | 728ea44823 | |
Houkime | 7808033bef | |
Houkime | 9bf239c3a8 | |
Houkime | 87248c3f8c | |
Houkime | 7ef751db98 | |
Houkime | 6e9d86e844 | |
Houkime | 69a05de3d7 | |
Houkime | 85c90105ea | |
Houkime | d4b2ca14bb | |
Houkime | 011e052962 | |
Houkime | d34b98e27b | |
Houkime | 7c8ea19608 | |
Houkime | cda8d70bd9 | |
Inex Code | cbd3cafe0a | |
Inex Code | 6d09c7aa9b | |
Inex Code | 860071e046 | |
Houkime | 993b58d52d | |
Houkime | 4339c00058 | |
Houkime | f1a452009a | |
Houkime | 5ac93c30ae | |
Houkime | 25378273eb | |
Houkime | 615e962965 | |
Houkime | 96bff873a9 | |
Inex Code | 113bcf4c29 | |
Inex Code | c3cec36ad4 | |
Houkime | e414f3b8fd | |
Houkime | 1bbb804919 | |
Houkime | dd6f37a17d | |
Houkime | 8badb9aaaf | |
Houkime | 8453f62c74 | |
Houkime | e78bcca9f2 | |
Houkime | 4d893d56b2 | |
Houkime | 73a847f288 | |
Houkime | b545a400c3 | |
Houkime | 8caf7e1b24 | |
Inex Code | bc98e41be8 | |
Houkime | a66ee2d3e5 | |
Houkime | 3deaeb28c5 | |
Houkime | b7cd703eaa | |
Inex Code | 829aca14be | |
Inex Code | badd619dd0 | |
Houkime | ece3258c78 | |
Houkime | 26c0a8fafe | |
Houkime | 0912ac1831 | |
Inex Code | 07aaa21602 | |
Inex Code | 2a03d3962f | |
Inex Code | cebb71ff4a | |
Inex Code | 62d5de0dd6 | |
Inex Code | 86f9451b9b | |
Inex Code | 8b840d4c2c | |
Inex Code | 6b106cbcf3 | |
Inex Code | 39baa3725b | |
Inex Code | 450a998ea6 | |
Inex Code | dedd6a9cc9 | |
Houkime | 56be3d9c31 | |
houkime | dd8fa4cf20 | |
Houkime | 0dfb41a689 | |
Inex Code | ad9384c850 | |
Houkime | 9fdc536f9f | |
Houkime | 0c04975ea4 | |
Houkime | 1fc47b049d | |
Houkime | a75a102df6 | |
Houkime | 9207f5385c | |
Inex Code | 5fd4daa3e7 | |
Inex Code | 9db717c774 | |
Inex Code | ada89a2494 | |
Houkime | 1333aad57d | |
Houkime | c68239044f | |
Houkime | 02b03cf401 | |
Houkime | de52dffdda | |
Houkime | 72535f8655 | |
Houkime | 0a852d8b50 | |
Inex Code | b6c3607d31 | |
Inex Code | f2c972ed5f | |
Inex Code | d6cf2abdc2 | |
Houkime | b2c7e8b73a | |
Houkime | 30b62c351a | |
Houkime | 1b9761293c | |
Houkime | 027a37bb47 | |
Houkime | d621ca6449 | |
Houkime | c89f9cf89d | |
Houkime | 69f6e62877 | |
Inex Code | 36e915907f | |
Houkime | 2c9011cc87 | |
Houkime | 0eb70e1551 | |
Houkime | 26ab7b4d7b | |
Houkime | eca4b26a31 | |
Houkime | 752a0b807e | |
Inex Code | 52336b885d | |
Inex Code | ff70a3588e | |
Inex Code | e6189e4e73 | |
Inex Code | a1267946fc | |
Inex Code | f4263b0288 | |
Inex Code | f08eafc3d8 | |
Inex Code | a87889b252 | |
Houkime | 2934e2beca | |
Houkime | bba837530a | |
Inex Code | 88af27a8ba | |
Inex Code | 641ab26069 | |
Inex Code | 829915029d | |
Inex Code | 6660e1d9d5 | |
Inex Code | 3bff43a6bf | |
Houkime | 00317cc7e4 | |
Houkime | cfa7f4ae59 | |
Houkime | ffec344ba8 | |
Houkime | aa7cc71557 | |
Houkime | e9bb6d9973 | |
houkime | c5b7ca7565 | |
Houkime | d664fcbdc4 | |
houkime | e8c59f5068 | |
Houkime | 26488aa12f | |
Inex Code | b01247bc55 | |
Inex Code | 413911849d | |
Inex Code | eafc7a412c | |
Inex Code | c9d20e8efd | |
Inex Code | e0a5ecdd9d | |
Inex Code | 95dbd4fac2 | |
Inex Code | f965546ca6 | |
Inex Code | ac236569b0 | |
Inex Code | 4b575b6138 | |
Inex Code | 0245d629fd | |
Inex Code | 2df448a4a9 | |
Houkime | f4ac3d29a9 | |
Houkime | 466160dbf9 | |
Inex Code | 5bee124500 | |
Inex Code | a757dc7cc1 | |
Inex Code | 5253780cc8 | |
Inex Code | 3067d353d8 | |
Houkime | fa53264136 | |
Houkime | a303e5ce37 | |
Inex Code | 8b504993d0 | |
Inex Code | 20f3e5c564 | |
Inex Code | 7fe802eb1d | |
Inex Code | 8805f73812 | |
Houkime | 45011450c5 | |
Houkime | f711275a5e | |
Houkime | 097cf50b37 | |
Houkime | c53f35c947 | |
Houkime | b001e198bf | |
Houkime | 40ad1b5ce4 | |
Houkime | a7427f3cb5 | |
Houkime | 86c2ae2c1f | |
Houkime | ea4e53f826 | |
Houkime | e2b906b219 | |
Houkime | d33e9d6335 | |
Houkime | 8e29634d02 | |
Houkime | be95b84d52 | |
Houkime | cacbf8335d | |
Houkime | 65ce86f0f9 | |
Houkime | 95e4296d0b | |
Houkime | 59fe386463 | |
Houkime | 02e3c9bd5e | |
Houkime | f361f44ded | |
Houkime | 4423db7458 | |
Houkime | 9137536294 | |
Houkime | 5467a62906 | |
Houkime | 9a28c0ebcb | |
Houkime | 7ad5f91be1 | |
Houkime | ae708e446b | |
Houkime | 1c28984475 | |
Inex Code | 2df930b9ba | |
Inex Code | 2c21bd2a14 | |
Inex Code | 21c5f6814c | |
Houkime | 559de63221 | |
Houkime | 5ff89c21d5 | |
Inex Code | ba9270755a | |
Houkime | 0e13e61b73 | |
Houkime | 1fb5e3af97 | |
Houkime | 2dd9da9a96 | |
Inex Code | a7d0f6226f | |
Houkime | e8f1f39b18 | |
Houkime | f804c88fa6 | |
Houkime | 6004977845 | |
Houkime | 3551813b34 | |
Houkime | ce55416b26 | |
Houkime | 16a96fe0fa | |
Houkime | f2161f0532 | |
Houkime | cb2273323f | |
Houkime | 6369042420 | |
Houkime | 3edb38262f | |
Houkime | 3684345c2d | |
Houkime | 6b0c55a786 | |
Houkime | dbac010303 | |
Houkime | c09f2f393b | |
Houkime | ce9b24b579 | |
Houkime | 4b1594ca22 | |
Houkime | c94b4d07bf | |
Inex Code | 4225772573 | |
Houkime | 2040272879 | |
Inex Code | f3dd18a830 | |
Inex Code | 0d622d431f | |
Inex Code | f27a3df807 | |
Inex Code | 1e840f8cff | |
Inex Code | b78ee5fcca | |
Houkime | 53dfb38284 | |
Houkime | ecf72948b1 | |
Houkime | f829a34dc7 | |
Houkime | 9f096ed2c0 | |
Houkime | cd32aa83b7 | |
Houkime | a56461fb96 | |
Houkime | b346a283a4 | |
Houkime | 806fb3c84b | |
Houkime | 1fd5db9ff3 | |
Houkime | 5d95c1b44e | |
Houkime | 1c96743c5d | |
Houkime | 38de01da8b | |
Houkime | 8475ae3375 | |
Houkime | a48856c9ad | |
Houkime | a8f72201a7 | |
Houkime | cf2dc6795a | |
Houkime | a486825a4f | |
Houkime | eac561c57c | |
Houkime | 53638b7e06 | |
Houkime | de1cbcb1ca | |
Houkime | cfda6b0810 | |
Houkime | 09c79b3477 | |
Inex Code | 93b98cd4fd | |
Inex Code | 421c92d12e | |
Inex Code | c603394449 | |
Houkime | f77556b60e | |
Houkime | b04dfc6c4e | |
Houkime | 42a5b6f70a | |
Inex Code | 32a242b560 | |
Inex Code | a4b0e6f208 | |
Houkime | ad130e392c | |
Houkime | 780c12df6c | |
Houkime | 6da0791b47 | |
Houkime | 792dcd459d | |
Houkime | 5100f1a497 | |
Houkime | 44e45a5124 | |
Houkime | 0b8f77e6f7 | |
Houkime | e3545d4541 | |
Houkime | 550f7fa620 | |
Houkime | cc073155db | |
Houkime | 891993e4cd | |
Houkime | 7e022e0cfe | |
Houkime | 44ddd27e84 | |
Houkime | 761b6be4e5 | |
Houkime | a76b4ac134 | |
Houkime | ac9fbbff3e | |
Houkime | bdae6cfb75 | |
Houkime | e7683352cd | |
Houkime | d0b27da641 | |
Houkime | d10bf99927 | |
Houkime | c5c41b3ced | |
Houkime | c8512eacdc | |
Houkime | d38b8180cb | |
Houkime | 1faaed992e | |
Houkime | 135fb0c42d | |
Houkime | ca036b294a | |
Houkime | afdbf01cfc | |
Houkime | ecf44e5169 | |
Houkime | ebff2b308a | |
Houkime | 2a87eb80f9 | |
Houkime | f116ce1bdb | |
Houkime | 05f2cc3f14 | |
Houkime | f622d617cf | |
Houkime | 312fceeb9c | |
Houkime | ac6d25c4c1 | |
Houkime | 026d72b551 | |
Houkime | 029cb47db6 | |
Houkime | b32ca3b11a | |
Houkime | fa86c45bd0 | |
Houkime | 4572c00640 | |
Houkime | d3f9ce7bf5 | |
Houkime | ebeb76149b | |
Houkime | 592eb1a1f8 | |
Houkime | f09d21a031 | |
Houkime | 7a5af6af99 | |
Houkime | aca05f26ea | |
Houkime | 92be699031 | |
Houkime | 71b987da57 | |
Houkime | 9f2dbaa98d | |
Houkime | 6057e350ef | |
Houkime | df5b318fff | |
Houkime | f0d6ac624d | |
Houkime | ae7f53d1ec | |
Houkime | 34854b5118 | |
Houkime | f5de4974e7 | |
Houkime | 208e256c0f | |
Houkime | 44041662c2 | |
Houkime | 3b8168c25d | |
Houkime | c2cd972805 | |
Houkime | 0a9848be47 | |
Houkime | ac04425221 | |
Houkime | 1019031b5b | |
Houkime | 95b88ea2e4 | |
Houkime | 498208f083 | |
Houkime | 840572f82c | |
Houkime | f3bfa2293c | |
Houkime | b21d63be63 | |
Houkime | 3aefbaaf0b | |
Houkime | f0aabec947 | |
Houkime | d1e1039519 | |
Houkime | 507cdb3bbd | |
Houkime | 6132f1bb4c | |
Houkime | 1940b29161 | |
Houkime | 5e9c651c65 | |
Houkime | b305c19559 | |
Houkime | ef57e25a26 | |
Houkime | f9eaaab929 | |
Houkime | 2c510ae884 | |
Houkime | ed0861aacc | |
Houkime | 054b07baa3 | |
Houkime | 343fda0630 | |
Houkime | 0a4338596b | |
Houkime | 79b9bb352a | |
Houkime | 951bb8d5ec | |
Houkime | d354f4ac0b | |
Houkime | 43b6ebd04d | |
Houkime | d57dc3f7d2 | |
Houkime | 35a4fec9d4 | |
Houkime | a134009165 | |
Houkime | d972fdc3cc | |
Houkime | 6f8f5cbb9e | |
Houkime | 02deae217d | |
Houkime | 48dc63a590 | |
Houkime | 873bc8282e | |
Houkime | c928263fce | |
Houkime | 0847e16089 | |
Houkime | 60dcde458c | |
Houkime | 1d403b0e94 | |
Houkime | c8a8d45110 | |
Houkime | ff6bc2a142 | |
Houkime | e56907f2cd | |
Houkime | a0a32a7f37 | |
Houkime | 228eab44bb | |
Houkime | 348ece8b9c | |
Houkime | a280e5c999 | |
Houkime | add4e21f39 | |
Houkime | b27f19b201 | |
Houkime | 5efb351159 | |
Houkime | 529608d52e | |
Houkime | 29c4b74a86 | |
Houkime | 3f30469532 | |
Houkime | a405eddbcf | |
Houkime | 5371c7feef | |
Houkime | e156e9cd58 | |
Houkime | 83b24f5fcd | |
Houkime | 4ca2e62b5c | |
Houkime | a42294b706 | |
Houkime | a0a0e1fb3b | |
Houkime | 95e2032c63 | |
Houkime | 178c456593 | |
Houkime | ff72d4124e | |
Houkime | 54103973bc | |
Houkime | a9cd8dda37 | |
Houkime | 86c99c0be8 | |
Houkime | 3f2c1e0593 | |
Houkime | fc7483a6f2 | |
Houkime | 37c18ead99 | |
Houkime | e5a965ea29 | |
Houkime | 45ab9423b9 | |
Houkime | 9097ba02d7 | |
Houkime | 7d76b74dbc | |
Houkime | 1e5fb67374 | |
Houkime | a3d58be0d5 | |
Houkime | a1071fd2c9 | |
Houkime | 7b7f782185 | |
Houkime | f65c0522b0 | |
Houkime | 6bf5ee4b64 | |
Houkime | 8eab26d552 | |
Houkime | 70cf0306a9 | |
Houkime | 9f19c677d0 | |
Houkime | 926d0c27c5 | |
Houkime | 4aa87edf47 | |
Houkime | 92cfd00f93 | |
Houkime | e43478d437 | |
Houkime | 7af7600599 | |
Houkime | 326e3d3b0c | |
Houkime | de8ef744eb | |
Houkime | 713296c520 | |
Houkime | e7a6700522 | |
Houkime | 169e9ad57d | |
Houkime | 6523105d89 | |
Houkime | 1cefaefa3b | |
Houkime | 2743441e1e | |
Houkime | 9075afd38a | |
Houkime | af5edb695f | |
Houkime | c74b3df32c | |
Houkime | 53bb5cc4e2 | |
Houkime | 03313b739a | |
Houkime | 3dc6fb91f2 | |
Houkime | b6eb27dc5e | |
Houkime | c5088e0e2c | |
Houkime | 953860a02c | |
Houkime | 6a00d3cff9 | |
Houkime | 08cc7740b3 | |
Houkime | 9793201ca1 | |
Inex Code | 64e7afe53e | |
Inex Code | 15ce344bc8 | |
Inex Code | b480c84041 | |
Houkime | cd2e9d3ba3 | |
Houkime | 3ee90617ba | |
Inex Code | 11184a55e8 | |
Houkime | e4865aa094 | |
Houkime | 35258bad38 | |
Houkime | 59fef1d016 | |
Inex Code | d9b26e12e2 | |
Houkime | ae16a527a2 | |
Houkime | 7147f97077 | |
Houkime | bae81b2f69 | |
Houkime | a76834d1ba | |
Houkime | f8029d133a | |
Houkime | 4ad4c3cc67 | |
Houkime | b9be0be6a2 | |
Houkime | 2f2c4f14af | |
Houkime | e6efd1b42d | |
Houkime | 1593474dc1 | |
Houkime | 71eeed926d | |
Houkime | 81d1762518 | |
Houkime | e159d2f1a7 | |
Houkime | 8604caa331 | |
Houkime | 273a1935a8 | |
Houkime | 2f71469f39 | |
Houkime | 6ca68fae17 | |
Inex Code | 1664f857ea | |
Houkime | b86d0cd850 | |
Inex Code | 1f558d6cf9 | |
Inex Code | 0f1d8e22f2 | |
Inex Code | e7e0fdc4a1 | |
Inex Code | c9cfb7d7bc | |
Houkime | 5be3c83952 | |
Inex Code | c6919293b6 | |
Houkime | e3a87f1d98 | |
Houkime | 25f3115c05 | |
Houkime | b5e2499a30 | |
Houkime | 612a46ee42 | |
Houkime | 4f2f24daac | |
Houkime | 3449837de9 | |
Houkime | f02e27cf06 | |
Houkime | 79c3b9598c | |
Houkime | 47aee3c1f1 | |
Houkime | 8e1e37c766 | |
Houkime | 33c60f971d | |
Inex Code | b3a37e8b1f | |
Houkime | b3724e240e | |
Houkime | 9e0b0d8caa | |
Houkime | cbedd196e4 | |
Houkime | 4862cdc72f | |
Houkime | 17c7dffb07 | |
Houkime | 39cd4b714b | |
Houkime | 92322d8fad | |
Houkime | 21791f20a2 | |
Houkime | aa9d082adc | |
Houkime | f1654c699c | |
Houkime | 928f026e7c | |
Inex Code | c77191864e | |
Houkime | 3344ab7c5d | |
Houkime | 5fbfaa73ea | |
Houkime | d0a17d7b7a | |
Houkime | da19cc8c0e | |
Houkime | 817f414dd9 | |
Houkime | baf72b730b | |
Houkime | 51018dd6c2 | |
Houkime | 158c1f13a6 | |
Houkime | 9cc6e304c0 | |
Houkime | e5756a0dd1 | |
Houkime | 72fdd412d9 | |
Houkime | 0b28fa2637 | |
Houkime | 2d6406c8c1 | |
Houkime | 18f5ff815c | |
Houkime | ce4fbdae0a | |
Houkime | de27032191 | |
Houkime | 137ae58b42 | |
Houkime | 92b2a67479 | |
Houkime | 6cb9cc6d03 | |
Houkime | 851d90b30c | |
Houkime | 503c9c99ef | |
Houkime | d09cd1bbe1 | |
Houkime | 1305144112 | |
Houkime | f5faf84a2b | |
Houkime | 0aaa90f54a | |
Houkime | 592d62f53f | |
Houkime | 367ba51c9d | |
Houkime | e739921835 | |
Houkime | 102d6b1c5c | |
Houkime | 6eb5800e4e | |
Houkime | 469f9d292d | |
Houkime | ba5f91b000 | |
Houkime | 4676e364a6 | |
Houkime | 5a1b48fa3d | |
Houkime | 7f5236701e | |
Houkime | d8c78cc14c | |
Houkime | d26d115172 | |
Houkime | 2f707cc0cc | |
Houkime | 00ba76c074 | |
Houkime | 824b018487 | |
Houkime | 8f645113e2 | |
Houkime | f45567b87b | |
Houkime | e55a55ef6f | |
Houkime | 02bfffa75f | |
Houkime | 42fa5fe524 | |
Houkime | 3aa3d197e2 | |
Houkime | e0bd6efcb2 | |
Houkime | 203940096c | |
Houkime | ac4d4e0127 | |
Houkime | 548f47963a | |
Houkime | 0239f3174e | |
Houkime | 74777c4343 | |
Houkime | 0bf18603d4 | |
Houkime | 458c4fd28a | |
Houkime | bfcec3d51d | |
Houkime | 179078aed2 | |
Houkime | 1d6275b75b | |
Houkime | 07fe2f8a55 | |
Houkime | 270e569af2 | |
Houkime | 7e0e6015cf | |
Houkime | f542c1e6c7 | |
Houkime | e125f3a4b1 | |
Houkime | 889c7eee6a | |
Houkime | 8065921862 | |
Houkime | 3e7ea01a42 | |
Houkime | 8554879dc2 | |
Houkime | c47977c100 | |
Houkime | d9bde6930b | |
Houkime | 865e304f42 | |
Inex Code | d356fad534 | |
Inex Code | f4df1f6a62 | |
Inex Code | 0ef6569d97 | |
Inex Code | f950dd1e93 | |
Inex Code | d02302c7b2 | |
Houkime | 549b149aaf | |
Houkime | 981445d594 | |
Houkime | 7f984b678f | |
Houkime | 98de85e569 | |
Houkime | 4b8abb6a66 | |
Houkime | 0c95c5913b | |
Houkime | ff58bdccbb | |
Houkime | d6609b29e8 | |
Houkime | 2168037a10 | |
Houkime | 9d5335f62c | |
Houkime | 9a94f7624e | |
Houkime | 50952d688a | |
Houkime | f7c0821675 | |
Houkime | 4b07d4de41 | |
Houkime | 603ed2ddf9 | |
Houkime | 9c4d2a0ba5 | |
Houkime | c3696d3a4b | |
Houkime | 345e2c10c4 | |
Houkime | 97e4c529f6 | |
Houkime | 0dc6f74754 | |
Houkime | 03d4632465 | |
Houkime | 9fab596f91 | |
Houkime | b1d0a80963 | |
Houkime | 54a8e0b2b0 | |
Houkime | 60806cd536 | |
Houkime | ddd2176a5d | |
Houkime | deb857bca9 | |
Houkime | a6b3a5e590 | |
Houkime | e25aa2cb33 | |
Houkime | 7ddfad10d4 | |
Houkime | b2e231ebae | |
Houkime | 0bf18dcdc7 | |
Houkime | 396b42b69c | |
Houkime | d340b0ca67 | |
Houkime | ad6cc5e1bc | |
Houkime | c0dfbb3ca2 | |
Houkime | 3eb8382d9b | |
Houkime | cb1dd1011e | |
Houkime | 108fca0eb3 | |
Houkime | d1425561d9 | |
Houkime | 72f4fc8ae7 | |
Houkime | fa360655fe | |
Houkime | 65acd3173a | |
Houkime | dde86725b9 | |
Houkime | ba72fadb8d | |
Houkime | 043675ce14 | |
Houkime | 238a656cd9 | |
Houkime | ad66513f27 | |
Houkime | c34eb6d447 | |
Houkime | 0b7d2d0bf4 | |
Houkime | a664ab3dd4 | |
Houkime | 3605a71c1d | |
Houkime | 617f7f1628 | |
Houkime | 4475bcea45 | |
Houkime | 042a2e4cf2 | |
Houkime | 312328af95 | |
Houkime | c040f0825c | |
Houkime | f8edcac33f | |
Houkime | 789fd71e2f | |
Houkime | a34b14449b | |
Houkime | 8a4f256c12 | |
Houkime | 0207b5a473 | |
Houkime | 1c0e3f0f92 | |
Houkime | 378f0ebd73 | |
Houkime | 48359ffd20 | |
Houkime | a68e94fad3 | |
Houkime | 30ac990985 | |
Houkime | fa26379a68 | |
Houkime | 08739f7ca8 | |
Houkime | 919ba1ad03 | |
Houkime | 1b1052d205 | |
Houkime | 0ae4192081 | |
Houkime | 4018dca184 | |
Houkime | ec85f060f8 | |
Houkime | 283c8d09cc | |
Houkime | f25e57c51a | |
Houkime | 5ad9f50b94 | |
Houkime | a2ff74244e | |
Houkime | 7699ba0d9b | |
Houkime | 146b0ca02f | |
Houkime | d4cad61d56 | |
Houkime | 3c42d8c413 | |
Houkime | e043720289 | |
Houkime | 417533fc04 | |
Houkime | 45f33e2d31 | |
Houkime | 18d5cd2b83 | |
Houkime | ae065867b3 | |
Houkime | 2eb64db199 | |
Houkime | 772b499b46 | |
Houkime | 144e4e5e91 | |
Houkime | a2dd47130b | |
Houkime | 4b2cecac8f | |
Houkime | 6524c98131 | |
Houkime | a0afe63b3d | |
Houkime | ea6a047478 | |
Houkime | 4e329299e0 | |
Houkime | 23ee958bfb | |
Houkime | d62d7534d7 | |
Houkime | dc3032eb03 | |
Houkime | 98e1c9ebaa | |
Houkime | c32353fe9b | |
Houkime | 8a1b3be9fa | |
Houkime | b3633d1a96 | |
Houkime | 01797cb922 | |
Houkime | 06deb83b33 | |
Houkime | e091fbd4a2 | |
Houkime | e944f4a267 | |
Houkime | f1987edd99 | |
Houkime | c493a49daa | |
Houkime | 865c8f5280 | |
Houkime | bd744ba211 | |
Houkime | 86e2b90bef | |
Houkime | c9fd656181 | |
Houkime | a043392b7b | |
Houkime | e447419f96 | |
Houkime | 3afe196901 | |
Houkime | 1568ee68a4 | |
Houkime | 3970524bce | |
Houkime | bf263f68b9 | |
Houkime | 7e243146db | |
Houkime | d5881cc523 | |
Houkime | 17df21964a | |
Houkime | 6fa72dbac1 | |
Houkime | 474f04f923 | |
Houkime | 90c2524800 | |
Houkime | e54224f15d | |
Houkime | a66a3f35fc | |
Houkime | f5066082c8 | |
Houkime | 934afeb531 | |
Houkime | af6a49b0a3 | |
Houkime | 24d170b3cb | |
Houkime | 23e3d553e6 | |
Houkime | a98314bb60 | |
Houkime | 6d73405118 | |
Houkime | 740b072b07 | |
Houkime | cf3cbd179f | |
Houkime | 4871958534 | |
Houkime | 060117a355 | |
Houkime | 80f2c99ba4 | |
Inex Code | 7c2e1dcf25 | |
Inex Code | 670911a92a | |
Inex Code | 13a2d8faca | |
Inex Code | 6845085a83 | |
Houkime | 4c6518a513 | |
Inex Code | 5ada12d2f3 | |
Inex Code | 6aadbabbc4 | |
Inex Code | 8ce13f0ad2 | |
Inex Code | 392ed79e92 | |
Inex Code | 1035f9215d | |
Inex Code | 4e7261c9c4 | |
Inex Code | e7a49e170d | |
Inex Code | b5ee542693 | |
Inex Code | 2db40650ad | |
def | 2fc635da71 | |
def | bcfb8e62e9 | |
Inex Code | 24353ca56a | |
Inex Code | 45c6133881 | |
Inex Code | cb403a94bd | |
Inex Code | 7b526b7bd9 | |
Inex Code | 34b1a47d04 | |
def | 5c30f80479 | |
Inex Code | 999dd95cab | |
Houkime | 67872d7c55 | |
Houkime | 3ecfb2eacb | |
Houkime | 450ff41ebd | |
Houkime | 8235c3595c | |
Houkime | 7d9bccf4ec | |
Houkime | 87ea88c50a | |
Houkime | 25326b75ca | |
Houkime | b3d6251d11 | |
Houkime | b11e5a5f77 | |
Houkime | 69577c2854 | |
Houkime | 7cf295450b | |
Houkime | f33d5155b0 | |
Houkime | 5d4ed73435 | |
Houkime | c037a12f4d | |
Houkime | 29723b9f3a | |
Houkime | cb1906144c | |
Houkime | 3f6aa9bd06 | |
Houkime | 5dedbda41f | |
Houkime | e817de6228 | |
Houkime | f928ca160a | |
Houkime | a2ac47b0f5 | |
houkime | 66480c9904 | |
Houkime | 5a1f64b1e7 | |
Houkime | 22a309466e | |
Houkime | 2a239e35ad | |
Houkime | 20410ec790 | |
Houkime | 3021584adc | |
Houkime | 16f71b0b09 | |
Houkime | 39277419ac | |
Houkime | d3bf867bb5 | |
Houkime | 009a89fa02 | |
Houkime | a97705ef25 | |
Houkime | ab70687c61 | |
Houkime | 5a25e2a270 | |
Houkime | 0ae7c43ebf | |
Houkime | 6f6a9f5ef0 | |
Houkime | fda5d315a9 | |
Houkime | 13e84e2697 | |
Houkime | eba1d01b3d | |
Houkime | 8dfb3eb936 | |
Houkime | 4579fec569 | |
Houkime | 257096084f | |
Houkime | bf6c230ae0 | |
Houkime | 95e200bfc5 | |
Houkime | 9ffd67fa19 | |
Houkime | b98ccb88d1 | |
Houkime | 3cb7f29593 | |
Houkime | e504585437 | |
Houkime | 647e02f25b | |
Houkime | ba6a5261fa | |
Houkime | d8e3cd67e0 | |
Houkime | 256c16fa9f | |
Houkime | f2fa47466b | |
Houkime | ca822cdf6f | |
Houkime | 2797c6f88f | |
Houkime | 4498003aca | |
Houkime | 772c0dfc64 | |
Houkime | 671203e990 | |
Houkime | 9a49067e53 | |
Houkime | 682cd4ae87 | |
Houkime | 572ec75c39 | |
Houkime | 27a7c24bc3 | |
Houkime | 4e60d1d37a | |
Houkime | ff264ec808 | |
Houkime | b856a2aad3 | |
Houkime | 0d748d7ab1 | |
Houkime | c12dca9d9b | |
Houkime | 4492bbe995 | |
Houkime | 84bfa333fa | |
Houkime | be13d6163e | |
Houkime | ce411e9291 | |
Houkime | cf7b7eb8a7 | |
Houkime | 3feebd5290 | |
Houkime | 73584872f0 | |
Houkime | dc778b545e | |
Houkime | f96d8b7d7c | |
Houkime | dd525fe723 | |
Houkime | b9c570720b | |
Houkime | 732e72d414 | |
Houkime | 6f400911fc | |
Houkime | c86eb8b786 | |
Houkime | fa54220327 | |
Houkime | b43c4014e2 | |
Houkime | db55685488 | |
Houkime | 3921d9fe4c | |
Houkime | 2e2d344f43 | |
Houkime | 55ad2484b8 | |
Houkime | 8a05a55b80 | |
Houkime | 4cfe0515ea | |
def | 16e0bd56ce | |
Inex Code | 9cf4d46c50 | |
Houkime | d47368cbe9 | |
Houkime | 063dfafc19 | |
Houkime | 5c86706f4b | |
Houkime | 14c4ae26ab | |
Inex Code | f0132266e9 | |
Houkime | 870d2c408d | |
Houkime | f51e378ff0 | |
Houkime | 7acbba9960 | |
Houkime | 106a083ca2 | |
Houkime | b6eeec23cc | |
Houkime | c21b6cb071 | |
Houkime | 144f95fb34 | |
Inex Code | 60919b88b9 | |
Inex Code | 57794c9535 | |
Houkime | 2e9cdf15ab | |
Inex Code | 3ce71b0993 | |
Houkime | d6ef01c0c7 | |
Houkime | 244851c7cc | |
Houkime | 5afa2338ca | |
def | e130d37033 | |
Houkime | f7b7e5a0be | |
Inex Code | 9ee0240bbd | |
Houkime | 41e5f89b7b | |
Houkime | 07af2e59be | |
Houkime | a723311b36 | |
Houkime | 589093b853 | |
Houkime | 6c6f45781c | |
Houkime | 2d7bc0f154 | |
Houkime | 5efa85f877 | |
Houkime | f4a7986cfa | |
Houkime | 2ec0548c09 | |
Houkime | dd15e0ab65 | |
Houkime | 9540e26ce1 | |
Houkime | 7d58eb3d92 | |
Houkime | decb98afe2 | |
Houkime | b0c26b876a | |
Inex Code | 27255cb533 | |
Inex Code | 83736e1e8f | |
Inex Code | 19a4ec5377 | |
Inex Code | 8cdacb73dd | |
Inex Code | 0a09a338b8 | |
Inex Code | 7a1e8af8fe | |
Inex Code | e387e30983 | |
Inex Code | 582e38452d | |
Inex Code | 6bbceca917 | |
Inex Code | 9a339729b7 | |
Inex Code | a7208c1a91 | |
Inex Code | 49571b6ef2 | |
Alya Sirko | a3260aadc3 | |
Inex Code | 9489180363 | |
Inex Code | 97acae189f | |
Inex Code | d7cba49c4a | |
Inex Code | 32278e9063 | |
Inex Code | 4f2332f8a0 | |
Inex Code | 0e68ef1386 | |
Inex Code | 7935de0fe1 | |
def | 206589d5ad | |
def | 337cf29884 | |
Inex Code | 5be240d357 | |
Inex Code | bec99f29ec | |
Inex Code | 8ea6548710 | |
Inex Code | 67c8486c9b | |
Inex Code | 1f64a76723 | |
Inex Code | e3245cd26a | |
Inex Code | a6fe72608f | |
Inex Code | 5532114668 | |
Inex Code | 26f9393d95 | |
Inex Code | eb21b65bbc | |
Inex Code | e3354c73ef | |
def | 9bd2896db8 | |
Inex Code | 63f3b2f4d1 | |
Inex Code | e5405dfc6b | |
Inex Code | 5711cf66b0 | |
Inex Code | 376bf1ef77 | |
Inex Code | 503a39f390 | |
Inex Code | 45c3e3003d | |
Inex Code | 80e5550f7d | |
Inex Code | c6a3588e33 | |
Inex Code | 07e723dec8 | |
Inex Code | 517a769e5b | |
Inex Code | e2ac429975 | |
Inex Code | 71c70592b2 | |
Inex Code | 6ca723867e | |
Inex Code | 766edc657a | |
Inex Code | 9b25bc0d53 | |
Inex Code | 17b8334c6e | |
Inex Code | 01dea50c1f | |
Inex Code | 28db251f1f | |
Inex Code | a6ad9aaf90 | |
Inex Code | fc971292c2 | |
Inex Code | c20b0c94f4 | |
Inex Code | 992a7837d4 | |
Inex Code | 99beee40d6 | |
Inex Code | 75e3143c82 | |
Inex Code | c30e062210 | |
Inex Code | 401dff23fb | |
Inex Code | 1ac5f72433 | |
Inex Code | 36bf1a80bf | |
Inex Code | c0c9c1e89e | |
Inex Code | 3044557963 | |
Inex Code | f31b1173a2 | |
Inex Code | ac220c6968 | |
Inex Code | f2c73853bc | |
Inex Code | 874acb1343 | |
NaiJi ✨ | 035796ecc7 | |
Inex Code | 6cd896f977 | |
Inex Code | 4cbb08cb6e | |
Inex Code | 72a9b11541 | |
Inex Code | 2ba3777713 | |
Inex Code | 250677f97d | |
Inex Code | 50a82a065e | |
Inex Code | c22fe9e8bd | |
Inex Code | f228db5b29 | |
Inex Code | 2235358827 | |
Inex Code | 98e60abe74 | |
Inex Code | 2ec9c8a441 | |
Inex Code | 6fbfee5b1b | |
Inex Code | fbb82c87e8 | |
Inex Code | 4f30017132 | |
Inex Code | 40501401b4 | |
Inex Code | 08c7f62e93 | |
Inex Code | 5140081cdb | |
Inex Code | 759e90f734 | |
Inex Code | ade7c77754 | |
Inex Code | fe86382819 | |
Inex Code | d7fe7097e6 | |
Inex Code | ea696d0f0e | |
Inex Code | aa76f87828 | |
Inex Code | 6c0af38e27 | |
Inex Code | 355fc68232 | |
Inex Code | 07cf926e79 | |
Inex Code | 650032bdab | |
Inex Code | 0c61c1abb5 | |
Inex Code | 13cef67204 | |
Illia Chub | 030a655c39 | |
Inex Code | 0d92be8fb7 | |
Inex Code | f235819a1c | |
Inex Code | f24323606f | |
Illia Chub | 0e8e78de08 | |
Illia Chub | 3cb6b769df | |
Inex Code | 710925f3ea | |
Inex Code | f4288dacd6 | |
Inex Code | 340b50bb0d | |
Inex Code | f68bd88a31 | |
Inex Code | dc4c9a89e1 | |
Inex Code | 71fc93914a | |
Inex Code | c53cab7b88 | |
Inex Code | 077c886f40 | |
Inex Code | 69eb91a1ea | |
Inex Code | 8e0a5f5ec0 | |
Illia Chub | 1385835a7f | |
Illia Chub | d1fdaf186d | |
Illia Chub | fb98fd1e60 | |
Inex Code | 245964c998 | |
Illia Chub | 205908b46c | |
Illia Chub | 9a87fa43eb | |
Illia Chub | b201cd6ca2 | |
Inex Code | b185724000 | |
Inex Code | ec7ff62d59 | |
Inex Code | eb4f25285d | |
Inex Code | e00aaf7118 | |
Inex Code | fb8066a2f5 | |
Inex Code | 1432671cbe | |
Inex Code | ec76484857 | |
Inex Code | dc56b6f4ad | |
Inex Code | c910b761d1 | |
Inex Code | 8700642260 | |
Inex Code | 82b7f97dce | |
Inex Code | 6df4204ca1 | |
Inex Code | 447cc5ff55 | |
Illia Chub | 48da8c2228 | |
Illia Chub | af4907dda5 | |
Inex Code | 6c3609f590 | |
Illia Chub | 59d251bb77 | |
Illia Chub | 5c4d871bcd | |
Illia Chub | a1e6c77cc1 | |
Illia Chub | a86f2fe2bb | |
Inex Code | 767c504a1d | |
Illia Chub | dbb4c10956 | |
Inex Code | af059c2efd | |
Inex Code | 26b054760c | |
Inex Code | 2f03cb0756 | |
Inex Code | 09f319d683 | |
Illia Chub | 5612ff5373 | |
Illia Chub | ba2d785ada | |
Illia Chub | 1bf4a779af | |
Illia Chub | f46e1ba134 | |
Illia Chub | 37bb014994 | |
Illia Chub | c1c9f18521 | |
Illia Chub | 956336da4b | |
Illia Chub | 376e38942b | |
Illia Chub | 82821603f1 | |
Illia Chub | 4b39d6e4f9 | |
Illia Chub | b7c8bade4c | |
Illia Chub | e6ef9be267 | |
Illia Chub | 568add06c6 | |
Illia Chub | 2f526c7bcd | |
Illia Chub | 7c144332cf | |
Illia Chub | 16b6a15d0c | |
Illia Chub | 8580c047d3 | |
Illia Chub | b63a4c3b39 | |
Illia Chub | 2ae6925d61 | |
Illia Chub | 4c63004954 | |
Illia Chub | ef79004f19 | |
Illia Chub | a1292232a7 | |
Illia Chub | 1bfa887e6b | |
Illia Chub | 1330e1c202 | |
Illia Chub | ffe89b6983 | |
Illia Chub | 413305c290 | |
Illia Chub | ea16f656ee | |
Illia Chub | d06e8dc662 | |
Illia Chub | a8ccab1901 | |
Illia Chub | f8a3c94fdd | |
Illia Chub | 5e8736fa5a | |
Illia Chub | e4267aba18 | |
Illia Chub | ebd60b9787 | |
Illia Chub | 1aa0a46267 | |
Illia Chub | 1386c24692 | |
Illia Chub | 4604bcb666 | |
Illia Chub | 8d05640a09 | |
Illia Chub | b8a2f20840 | |
Illia Chub | 9ec897f519 | |
Illia Chub | e7edbe2669 | |
Illia Chub | 18d8d78f0d | |
Illia Chub | 685baf4e04 | |
Illia Chub | 678488866c | |
Illia Chub | b9093f041b | |
Illia Chub | 0980039a67 | |
Illia Chub | 2795aa5fd3 | |
Illia Chub | e62fc24644 | |
Illia Chub | b96bfd755d | |
Illia Chub | 17c3e6f9e3 | |
Illia Chub | 1071bc459b | |
Illia Chub | a4987210ac | |
Illia Chub | ff13321154 | |
Illia Chub | fe6e3c6034 | |
Illia Chub | db726b82e2 | |
Illia Chub | 2885fe4356 | |
Illia Chub | a8a2fe06fb | |
Illia Chub | fe1773b7f5 | |
Illia Chub | d2716f5816 | |
Illia Chub | 1ceda086f5 | |
Illia Chub | 858b8e4698 |
|
@ -0,0 +1,2 @@
|
||||||
|
[run]
|
||||||
|
source = selfprivacy_api
|
|
@ -0,0 +1,28 @@
|
||||||
|
kind: pipeline
|
||||||
|
type: exec
|
||||||
|
name: default
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Run Tests and Generate Coverage Report
|
||||||
|
commands:
|
||||||
|
- nix flake check -L
|
||||||
|
- sonar-scanner -Dsonar.projectKey=SelfPrivacy-REST-API -Dsonar.sources=. -Dsonar.host.url=http://analyzer.lan:9000 -Dsonar.login="$SONARQUBE_TOKEN"
|
||||||
|
environment:
|
||||||
|
SONARQUBE_TOKEN:
|
||||||
|
from_secret: SONARQUBE_TOKEN
|
||||||
|
|
||||||
|
|
||||||
|
- name: Run Bandit Checks
|
||||||
|
commands:
|
||||||
|
- bandit -ll -r selfprivacy_api
|
||||||
|
|
||||||
|
- name: Run Code Formatting Checks
|
||||||
|
commands:
|
||||||
|
- black --check .
|
||||||
|
|
||||||
|
node:
|
||||||
|
server: builder
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
event:
|
||||||
|
- push
|
|
@ -0,0 +1,4 @@
|
||||||
|
[flake8]
|
||||||
|
max-line-length = 80
|
||||||
|
select = C,E,F,W,B,B950
|
||||||
|
extend-ignore = E203, E501
|
|
@ -0,0 +1,153 @@
|
||||||
|
users.nix
|
||||||
|
|
||||||
|
### Flask ###
|
||||||
|
instance/*
|
||||||
|
!instance/.gitignore
|
||||||
|
.webassets-cache
|
||||||
|
.env
|
||||||
|
|
||||||
|
### Flask.Python Stack ###
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# End of https://www.toptal.com/developers/gitignore/api/flask
|
||||||
|
|
||||||
|
*.db
|
||||||
|
*.rdb
|
||||||
|
|
||||||
|
/result
|
||||||
|
/.nixos-test-history
|
|
@ -0,0 +1,8 @@
|
||||||
|
# Default ignored files
|
||||||
|
/shelf/
|
||||||
|
/workspace.xml
|
||||||
|
# Editor-based HTTP Client requests
|
||||||
|
/httpRequests/
|
||||||
|
# Datasource local storage ignored files
|
||||||
|
/dataSources/
|
||||||
|
/dataSources.local.xml
|
|
@ -0,0 +1,6 @@
|
||||||
|
<component name="InspectionProjectProfileManager">
|
||||||
|
<settings>
|
||||||
|
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||||
|
<version value="1.0" />
|
||||||
|
</settings>
|
||||||
|
</component>
|
|
@ -0,0 +1,4 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
|
||||||
|
</project>
|
|
@ -0,0 +1,8 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="ProjectModuleManager">
|
||||||
|
<modules>
|
||||||
|
<module fileurl="file://$PROJECT_DIR$/.idea/selfprivacy-rest-api.iml" filepath="$PROJECT_DIR$/.idea/selfprivacy-rest-api.iml" />
|
||||||
|
</modules>
|
||||||
|
</component>
|
||||||
|
</project>
|
|
@ -0,0 +1,15 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<module type="PYTHON_MODULE" version="4">
|
||||||
|
<component name="NewModuleRootManager">
|
||||||
|
<content url="file://$MODULE_DIR$" />
|
||||||
|
<orderEntry type="jdk" jdkName="Python 3.9" jdkType="Python SDK" />
|
||||||
|
<orderEntry type="sourceFolder" forTests="false" />
|
||||||
|
</component>
|
||||||
|
<component name="PyDocumentationSettings">
|
||||||
|
<option name="format" value="PLAIN" />
|
||||||
|
<option name="myDocStringFormat" value="Plain" />
|
||||||
|
</component>
|
||||||
|
<component name="TestRunnerService">
|
||||||
|
<option name="PROJECT_TEST_RUNNER" value="py.test" />
|
||||||
|
</component>
|
||||||
|
</module>
|
|
@ -0,0 +1,12 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="CommitMessageInspectionProfile">
|
||||||
|
<profile version="1.0">
|
||||||
|
<inspection_tool class="CommitFormat" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||||
|
<inspection_tool class="CommitNamingConvention" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||||
|
</profile>
|
||||||
|
</component>
|
||||||
|
<component name="VcsDirectoryMappings">
|
||||||
|
<mapping directory="" vcs="Git" />
|
||||||
|
</component>
|
||||||
|
</project>
|
|
@ -0,0 +1,6 @@
|
||||||
|
[MASTER]
|
||||||
|
init-hook="from pylint.config import find_pylintrc; import os, sys; sys.path.append(os.path.dirname(find_pylintrc()))"
|
||||||
|
extension-pkg-whitelist=pydantic
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
max-line-length=88
|
|
@ -0,0 +1,19 @@
|
||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Python: FastAPI",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "uvicorn",
|
||||||
|
"args": [
|
||||||
|
"selfprivacy_api.app:app"
|
||||||
|
],
|
||||||
|
"jinja": true,
|
||||||
|
"justMyCode": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"python.formatting.provider": "black",
|
||||||
|
"python.linting.pylintEnabled": true,
|
||||||
|
"python.linting.enabled": true,
|
||||||
|
"python.testing.pytestArgs": [
|
||||||
|
"tests"
|
||||||
|
],
|
||||||
|
"python.testing.unittestEnabled": false,
|
||||||
|
"python.testing.pytestEnabled": true,
|
||||||
|
"python.languageServer": "Pylance",
|
||||||
|
"python.analysis.typeCheckingMode": "basic"
|
||||||
|
}
|
|
@ -0,0 +1,88 @@
|
||||||
|
# SelfPrivacy API contributors guide
|
||||||
|
|
||||||
|
Instructions for [VScode](https://code.visualstudio.com) or [VScodium](https://github.com/VSCodium/vscodium) under Unix-like platform.
|
||||||
|
|
||||||
|
1. **To get started, create an account for yourself on the** [**SelfPrivacy Gitea**](https://git.selfprivacy.org/user/sign_up). Proceed to fork
|
||||||
|
the [repository](https://git.selfprivacy.org/SelfPrivacy/selfprivacy-rest-api), and clone it on your local computer:
|
||||||
|
|
||||||
|
```git clone https://git.selfprivacy.org/your_user_name/selfprivacy-rest-api```
|
||||||
|
|
||||||
|
2. **Install Nix**
|
||||||
|
|
||||||
|
```sh <(curl -L https://nixos.org/nix/install)```
|
||||||
|
|
||||||
|
For detailed installation information, please review and follow: [link](https://nixos.org/manual/nix/stable/installation/installing-binary.html#installing-a-binary-distribution).
|
||||||
|
|
||||||
|
3. **Change directory to the cloned repository and start a nix shell:**
|
||||||
|
|
||||||
|
```cd selfprivacy-rest-api && nix-shell```
|
||||||
|
|
||||||
|
Nix will install all of the necessary packages for development work, all further actions will take place only within nix-shell.
|
||||||
|
|
||||||
|
4. **Install these plugins for VScode/VScodium**
|
||||||
|
|
||||||
|
Required: ```ms-python.python```, ```ms-python.vscode-pylance```
|
||||||
|
|
||||||
|
Optional, but highly recommended: ```ms-python.black-formatter```, ```bbenoist.Nix```, ```ryanluker.vscode-coverage-gutters```
|
||||||
|
|
||||||
|
5. **Set the path to the python interpreter from the nix store.** To do this, execute the command:
|
||||||
|
|
||||||
|
```whereis python```
|
||||||
|
|
||||||
|
Copy the path that starts with ```/nix/store/``` and ends with ```env/bin/python```
|
||||||
|
|
||||||
|
```/nix/store/???-python3-3.9.??-env/bin/python```
|
||||||
|
|
||||||
|
Click on the python version selection in the lower right corner, and replace the path to the interpreter in the project with the one you copied from the terminal.
|
||||||
|
|
||||||
|
6. **Congratulations :) Now you can develop new changes and test the project locally in a Nix environment.**
|
||||||
|
|
||||||
|
## What do you need to know before starting development work?
|
||||||
|
- RestAPI is no longer utilized, the project has moved to [GraphQL](https://graphql.org), however, the API functionality still works on Rest
|
||||||
|
|
||||||
|
|
||||||
|
## What to do after making changes to the repository?
|
||||||
|
|
||||||
|
**Run unit tests** using ```pytest .```
|
||||||
|
Make sure that all tests pass successfully and the API works correctly. For convenience, you can use the built-in VScode interface.
|
||||||
|
|
||||||
|
How to review the percentage of code coverage? Execute the command:
|
||||||
|
|
||||||
|
```coverage run -m pytest && coverage xml && coverage report```
|
||||||
|
|
||||||
|
Next, use the recommended extension ```ryanluker.vscode-coverage-gutters```, navigate to one of the test files, and click the "watch" button on the bottom panel of VScode.
|
||||||
|
|
||||||
|
**Format (linting) code**, we use [black](https://pypi.org/project/black/) formatting, enter
|
||||||
|
```black .``` to automatically format files, or use the recommended extension.
|
||||||
|
|
||||||
|
**And please remember, we have adopted** [**commit naming convention**](https://www.conventionalcommits.org/en/v1.0.0/), follow the link for more information.
|
||||||
|
|
||||||
|
Please request a review from at least one of the other maintainers. If you are not sure who to request, request a review from SelfPrivacy/Devs team.
|
||||||
|
|
||||||
|
## Helpful links!
|
||||||
|
|
||||||
|
**SelfPrivacy Contributor chat :3**
|
||||||
|
|
||||||
|
- [**Telegram:** @selfprivacy_dev](https://t.me/selfprivacy_dev)
|
||||||
|
- [**Matrix:** #dev:selfprivacy.org](https://matrix.to/#/#dev:selfprivacy.org)
|
||||||
|
|
||||||
|
**Helpful material to review:**
|
||||||
|
|
||||||
|
- [GraphQL Query Language Documentation](https://graphql.org/)
|
||||||
|
- [Documentation Strawberry - python library for working with GraphQL](https://strawberry.rocks/docs/)
|
||||||
|
- [Nix Documentation](https://nixos.org/guides/ad-hoc-developer-environments.html)
|
||||||
|
|
||||||
|
### Track your time
|
||||||
|
|
||||||
|
If you are working on a task, please track your time and add it to the commit message. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
feat: add new feature
|
||||||
|
|
||||||
|
- did some work
|
||||||
|
- did some more work
|
||||||
|
|
||||||
|
fixes #4, spent @1h30m
|
||||||
|
```
|
||||||
|
|
||||||
|
[Timewarrior](https://timewarrior.net/) is a good tool for tracking time.
|
|
@ -0,0 +1,661 @@
|
||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
|
@ -0,0 +1,92 @@
|
||||||
|
# SelfPrivacy GraphQL API which allows app to control your server
|
||||||
|
|
||||||
|
![CI status](https://ci.selfprivacy.org/api/badges/SelfPrivacy/selfprivacy-rest-api/status.svg)
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ nix build
|
||||||
|
```
|
||||||
|
|
||||||
|
In case of successful build, you should get the `./result` symlink to a folder (in `/nix/store`) with build contents.
|
||||||
|
|
||||||
|
## Develop
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ nix develop
|
||||||
|
[SP devshell:/dir/selfprivacy-rest-api]$ python
|
||||||
|
Python 3.10.13 (main, Aug 24 2023, 12:59:26) [GCC 12.3.0] on linux
|
||||||
|
Type "help", "copyright", "credits" or "license" for more information.
|
||||||
|
(ins)>>>
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't have experimental flakes enabled, you can use the following command:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ nix --extra-experimental-features nix-command --extra-experimental-features flakes develop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
Run the test suite by running coverage with pytest inside an ephemeral NixOS VM with redis service enabled:
|
||||||
|
```console
|
||||||
|
$ nix flake check -L
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the same test suite, but additionally create `./result/coverage.xml` in the current directory:
|
||||||
|
```console
|
||||||
|
$ nix build .#checks.x86_64-linux.default -L
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, just print the path to `/nix/store/...coverage.xml` without creating any files in the current directory:
|
||||||
|
```console
|
||||||
|
$ nix build .#checks.x86_64-linux.default -L --print-out-paths --no-link
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the same test suite with arbitrary pytest options:
|
||||||
|
```console
|
||||||
|
$ pytest-vm.sh # specify pytest options here, e.g. `--last-failed`
|
||||||
|
```
|
||||||
|
When running using the script, pytest cache is preserved between runs in `.pytest_cache` folder.
|
||||||
|
NixOS VM state temporary resides in `${TMPDIR:=/tmp}/nixos-vm-tmp-dir/vm-state-machine` during the test.
|
||||||
|
Git workdir directory is shared read-write with VM via `.nixos-vm-tmp-dir/shared-xchg` symlink. VM accesses workdir contents via `/tmp/shared` mount point and `/root/source` symlink.
|
||||||
|
|
||||||
|
Launch VM and execute commands manually either in Linux console (user `root`) or using python NixOS tests driver API (refer to [NixOS documentation](https://nixos.org/manual/nixos/stable/#ssec-machine-objects)):
|
||||||
|
```console
|
||||||
|
$ nix run .#checks.x86_64-linux.default.driverInteractive
|
||||||
|
```
|
||||||
|
|
||||||
|
You can add `--keep-vm-state` in order to keep VM state between runs:
|
||||||
|
```console
|
||||||
|
$ TMPDIR=".nixos-vm-tmp-dir" nix run .#checks.x86_64-linux.default.driverInteractive --keep-vm-state
|
||||||
|
```
|
||||||
|
|
||||||
|
Option `-L`/`--print-build-logs` is optional for all nix commands. It tells nix to print each log line one after another instead of overwriting a single one.
|
||||||
|
|
||||||
|
## Dependencies and Dependant Modules
|
||||||
|
|
||||||
|
This flake depends on a single Nix flake input - nixpkgs repository. nixpkgs repository is used for all software packages used to build, run API service, tests, etc.
|
||||||
|
|
||||||
|
In order to synchronize nixpkgs input with the same from selfprivacy-nixos-config repository, use this command:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ nix flake lock --override-input nixpkgs nixpkgs --inputs-from git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace BRANCH with the branch name of selfprivacy-nixos-config repository you want to sync with. During development nixpkgs input update might be required in both selfprivacy-rest-api and selfprivacy-nixos-config repositories simultaneously. So, a new feature branch might be temporarily used until selfprivacy-nixos-config gets the feature branch merged.
|
||||||
|
|
||||||
|
Show current flake inputs (e.g. nixpkgs):
|
||||||
|
```console
|
||||||
|
$ nix flake metadata
|
||||||
|
```
|
||||||
|
|
||||||
|
Show selfprivacy-nixos-config Nix flake inputs (including nixpkgs):
|
||||||
|
```console
|
||||||
|
$ nix flake metadata git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=BRANCH
|
||||||
|
```
|
||||||
|
|
||||||
|
Nix code for NixOS service module for API is located in NixOS configuration repository.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Sometimes commands inside `nix develop` refuse to work properly if the calling shell lacks `LANG` environment variable. Try to set it before entering `nix develop`.
|
|
@ -0,0 +1,29 @@
|
||||||
|
{ pythonPackages, rev ? "local" }:
|
||||||
|
|
||||||
|
pythonPackages.buildPythonPackage rec {
|
||||||
|
pname = "selfprivacy-graphql-api";
|
||||||
|
version = rev;
|
||||||
|
src = builtins.filterSource (p: t: p != ".git" && t != "symlink") ./.;
|
||||||
|
propagatedBuildInputs = with pythonPackages; [
|
||||||
|
fastapi
|
||||||
|
gevent
|
||||||
|
huey
|
||||||
|
mnemonic
|
||||||
|
portalocker
|
||||||
|
psutil
|
||||||
|
pydantic
|
||||||
|
pytz
|
||||||
|
redis
|
||||||
|
setuptools
|
||||||
|
strawberry-graphql
|
||||||
|
typing-extensions
|
||||||
|
uvicorn
|
||||||
|
];
|
||||||
|
pythonImportsCheck = [ "selfprivacy_api" ];
|
||||||
|
doCheck = false;
|
||||||
|
meta = {
|
||||||
|
description = ''
|
||||||
|
SelfPrivacy Server Management API
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1709677081,
|
||||||
|
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
|
||||||
|
"owner": "nixos",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nixos",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
|
@ -0,0 +1,162 @@
|
||||||
|
{
|
||||||
|
description = "SelfPrivacy API flake";
|
||||||
|
|
||||||
|
inputs.nixpkgs.url = "github:nixos/nixpkgs";
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, ... }:
|
||||||
|
let
|
||||||
|
system = "x86_64-linux";
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
selfprivacy-graphql-api = pkgs.callPackage ./default.nix {
|
||||||
|
pythonPackages = pkgs.python310Packages;
|
||||||
|
rev = self.shortRev or self.dirtyShortRev or "dirty";
|
||||||
|
};
|
||||||
|
python = self.packages.${system}.default.pythonModule;
|
||||||
|
python-env =
|
||||||
|
python.withPackages (ps:
|
||||||
|
self.packages.${system}.default.propagatedBuildInputs ++ (with ps; [
|
||||||
|
coverage
|
||||||
|
pytest
|
||||||
|
pytest-datadir
|
||||||
|
pytest-mock
|
||||||
|
pytest-subprocess
|
||||||
|
black
|
||||||
|
mypy
|
||||||
|
pylsp-mypy
|
||||||
|
python-lsp-black
|
||||||
|
python-lsp-server
|
||||||
|
pyflakes
|
||||||
|
typer # for strawberry
|
||||||
|
types-redis # for mypy
|
||||||
|
] ++ strawberry-graphql.optional-dependencies.cli));
|
||||||
|
|
||||||
|
vmtest-src-dir = "/root/source";
|
||||||
|
shellMOTD = ''
|
||||||
|
Welcome to SP API development shell!
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
|
||||||
|
black
|
||||||
|
nixpkgs-fmt
|
||||||
|
|
||||||
|
[testing in NixOS VM]
|
||||||
|
|
||||||
|
nixos-test-driver - run an interactive NixOS VM with all dependencies included and 2 disk volumes
|
||||||
|
pytest-vm - run pytest in an ephemeral NixOS VM with Redis, accepting pytest arguments
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
|
||||||
|
packages.${system} = {
|
||||||
|
default = selfprivacy-graphql-api;
|
||||||
|
pytest-vm = pkgs.writeShellScriptBin "pytest-vm" ''
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
# see https://github.com/NixOS/nixpkgs/blob/66a9817cec77098cfdcbb9ad82dbb92651987a84/nixos/lib/test-driver/test_driver/machine.py#L359
|
||||||
|
export TMPDIR=''${TMPDIR:=/tmp}/nixos-vm-tmp-dir
|
||||||
|
readonly NIXOS_VM_SHARED_DIR_HOST="$TMPDIR/shared-xchg"
|
||||||
|
readonly NIXOS_VM_SHARED_DIR_GUEST="/tmp/shared"
|
||||||
|
|
||||||
|
mkdir -p "$TMPDIR"
|
||||||
|
ln -sfv "$PWD" -T "$NIXOS_VM_SHARED_DIR_HOST"
|
||||||
|
|
||||||
|
SCRIPT=$(cat <<EOF
|
||||||
|
start_all()
|
||||||
|
machine.succeed("ln -sf $NIXOS_VM_SHARED_DIR_GUEST -T ${vmtest-src-dir} >&2")
|
||||||
|
machine.succeed("cd ${vmtest-src-dir} && coverage run -m pytest -v $@ >&2")
|
||||||
|
machine.succeed("cd ${vmtest-src-dir} && coverage report >&2")
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
if [ -f "/etc/arch-release" ]; then
|
||||||
|
${self.checks.${system}.default.driverInteractive}/bin/nixos-test-driver --no-interactive <(printf "%s" "$SCRIPT")
|
||||||
|
else
|
||||||
|
${self.checks.${system}.default.driver}/bin/nixos-test-driver -- <(printf "%s" "$SCRIPT")
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
nixosModules.default =
|
||||||
|
import ./nixos/module.nix self.packages.${system}.default;
|
||||||
|
devShells.${system}.default = pkgs.mkShellNoCC {
|
||||||
|
name = "SP API dev shell";
|
||||||
|
packages = with pkgs; [
|
||||||
|
nixpkgs-fmt
|
||||||
|
rclone
|
||||||
|
redis
|
||||||
|
restic
|
||||||
|
self.packages.${system}.pytest-vm
|
||||||
|
# FIXME consider loading this explicitly only after ArchLinux issue is solved
|
||||||
|
self.checks.x86_64-linux.default.driverInteractive
|
||||||
|
# the target API application python environment
|
||||||
|
python-env
|
||||||
|
];
|
||||||
|
shellHook = ''
|
||||||
|
# envs set with export and as attributes are treated differently.
|
||||||
|
# for example. printenv <Name> will not fetch the value of an attribute.
|
||||||
|
export TEST_MODE="true"
|
||||||
|
|
||||||
|
# more tips for bash-completion to work on non-NixOS:
|
||||||
|
# https://discourse.nixos.org/t/whats-the-nix-way-of-bash-completion-for-packages/20209/16?u=alexoundos
|
||||||
|
# Load installed profiles
|
||||||
|
for file in "/etc/profile.d/"*.sh; do
|
||||||
|
# If that folder doesn't exist, bash loves to return the whole glob
|
||||||
|
[[ -f "$file" ]] && source "$file"
|
||||||
|
done
|
||||||
|
|
||||||
|
printf "%s" "${shellMOTD}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
checks.${system} = {
|
||||||
|
fmt-check = pkgs.runCommandLocal "sp-api-fmt-check"
|
||||||
|
{ nativeBuildInputs = [ pkgs.black ]; }
|
||||||
|
"black --check ${self.outPath} > $out";
|
||||||
|
default =
|
||||||
|
pkgs.testers.runNixOSTest {
|
||||||
|
name = "default";
|
||||||
|
nodes.machine = { lib, pkgs, ... }: {
|
||||||
|
# 2 additional disks (1024 MiB and 200 MiB) with empty ext4 FS
|
||||||
|
virtualisation.emptyDiskImages = [ 1024 200 ];
|
||||||
|
virtualisation.fileSystems."/volumes/vdb" = {
|
||||||
|
autoFormat = true;
|
||||||
|
device = "/dev/vdb"; # this name is chosen by QEMU, not here
|
||||||
|
fsType = "ext4";
|
||||||
|
noCheck = true;
|
||||||
|
};
|
||||||
|
virtualisation.fileSystems."/volumes/vdc" = {
|
||||||
|
autoFormat = true;
|
||||||
|
device = "/dev/vdc"; # this name is chosen by QEMU, not here
|
||||||
|
fsType = "ext4";
|
||||||
|
noCheck = true;
|
||||||
|
};
|
||||||
|
boot.consoleLogLevel = lib.mkForce 3;
|
||||||
|
documentation.enable = false;
|
||||||
|
services.journald.extraConfig = lib.mkForce "";
|
||||||
|
services.redis.servers.sp-api = {
|
||||||
|
enable = true;
|
||||||
|
save = [ ];
|
||||||
|
settings.notify-keyspace-events = "KEA";
|
||||||
|
};
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
python-env
|
||||||
|
# TODO: these can be passed via wrapper script around app
|
||||||
|
rclone
|
||||||
|
restic
|
||||||
|
];
|
||||||
|
environment.variables.TEST_MODE = "true";
|
||||||
|
systemd.tmpfiles.settings.src.${vmtest-src-dir}.L.argument =
|
||||||
|
self.outPath;
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
machine.succeed("cd ${vmtest-src-dir} && coverage run --data-file=/tmp/.coverage -m pytest -p no:cacheprovider -v >&2")
|
||||||
|
machine.succeed("coverage xml --rcfile=${vmtest-src-dir}/.coveragerc --data-file=/tmp/.coverage >&2")
|
||||||
|
machine.copy_from_vm("coverage.xml", ".")
|
||||||
|
machine.succeed("coverage report >&2")
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
nixConfig.bash-prompt = ''\n\[\e[1;32m\][\[\e[0m\]\[\e[1;34m\]SP devshell\[\e[0m\]\[\e[1;32m\]:\w]\$\[\[\e[0m\] '';
|
||||||
|
}
|
81
main.py
81
main.py
|
@ -1,81 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
from flask import Flask, jsonify, request, json
|
|
||||||
from flask_restful import Resource, Api, reqparse
|
|
||||||
import base64
|
|
||||||
import pandas as pd
|
|
||||||
import ast
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
app = Flask(__name__)
|
|
||||||
api = Api(app)
|
|
||||||
@app.route("/systemVersion", methods=["GET"])
|
|
||||||
def uname():
|
|
||||||
uname = subprocess.check_output(["uname", "-arm"])
|
|
||||||
return jsonify(uname)
|
|
||||||
@app.route("/getDKIM", methods=["GET"])
|
|
||||||
def getDkimKey():
|
|
||||||
with open("/var/domain") as domainFile:
|
|
||||||
domain = domainFile.readline()
|
|
||||||
domain = domain.rstrip("\n")
|
|
||||||
catProcess = subprocess.Popen(["cat", "/var/dkim/" + domain + ".selector.txt"], stdout=subprocess.PIPE)
|
|
||||||
dkim = catProcess.communicate()[0]
|
|
||||||
dkim = base64.b64encode(dkim)
|
|
||||||
dkim = str(dkim, 'utf-8')
|
|
||||||
print(dkim)
|
|
||||||
response = app.response_class(
|
|
||||||
response=json.dumps(dkim),
|
|
||||||
status=200,
|
|
||||||
mimetype='application/json'
|
|
||||||
)
|
|
||||||
return response
|
|
||||||
@app.route("/pythonVersion", methods=["GET"])
|
|
||||||
def getPythonVersion():
|
|
||||||
pythonVersion = subprocess.check_output(["python","--version"])
|
|
||||||
return jsonify(pythonVersion)
|
|
||||||
@app.route("/apply", methods=["GET"])
|
|
||||||
def rebuildSystem():
|
|
||||||
rebuildResult = subprocess.Popen(["nixos-rebuild","switch"])
|
|
||||||
rebuildResult.communicate()[0]
|
|
||||||
return jsonify(rebuildResult.returncode)
|
|
||||||
@app.route("/rollback", methods=["GET"])
|
|
||||||
def rollbackSystem():
|
|
||||||
rollbackResult = subprocess.Popen(["nixos-rebuild","switch","--rollback"])
|
|
||||||
rollbackResult.communicate()[0]
|
|
||||||
return jsonify(rollbackResult.returncode)
|
|
||||||
@app.route("/upgrade", methods=["GET"])
|
|
||||||
def upgradeSystem():
|
|
||||||
upgradeResult = subprocess.Popen(["nixos-rebuild","switch","--upgrade"])
|
|
||||||
upgradeResult.communicate()[0]
|
|
||||||
return jsonify(upgradeResult.returncode)
|
|
||||||
@app.route("/createUser", methods=["GET"])
|
|
||||||
def createUser():
|
|
||||||
user = subprocess.Popen(["useradd","-m",request.headers.get("X-User")])
|
|
||||||
user.communicate()[0]
|
|
||||||
return jsonify(user.returncode)
|
|
||||||
@app.route("/deleteUser", methods=["DELETE"])
|
|
||||||
def deleteUser():
|
|
||||||
user = subprocess.Popen(["userdel",request.headers.get("X-User")])
|
|
||||||
user.communicate()[0]
|
|
||||||
return jsonify(user.returncode)
|
|
||||||
@app.route("/serviceStatus", methods=["GET"])
|
|
||||||
def getServiceStatus():
|
|
||||||
imapService = subprocess.Popen(["systemctl", "status", "dovecot2.service"])
|
|
||||||
imapService.communicate()[0]
|
|
||||||
smtpService = subprocess.Popen(["systemctl", "status", "postfix.service"])
|
|
||||||
smtpService.communicate()[0]
|
|
||||||
httpService = subprocess.Popen(["systemctl", "status", "nginx.service"])
|
|
||||||
httpService.communicate()[0]
|
|
||||||
return jsonify(
|
|
||||||
imap=imapService.returncode,
|
|
||||||
smtp=smtpService.returncode,
|
|
||||||
http=httpService.returncode
|
|
||||||
)
|
|
||||||
@app.route("/decryptDisk", methods=["POST"])
|
|
||||||
def requestDiskDecryption():
|
|
||||||
decryptionService = subprocess.Popen(["echo", "-n", request.headers['X-Decryption-Key'], "|", "cryptsetup", "luksOpen", "/dev/sdb", "decryptedVar"], stdout=subprocess.PIPE, shell=False)
|
|
||||||
decryptionService.communicate()[0]
|
|
||||||
return jsonify(
|
|
||||||
status=decryptionService.returncode
|
|
||||||
)
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app.run(port=5050, debug=False)
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
@startuml
|
||||||
|
|
||||||
|
left to right direction
|
||||||
|
|
||||||
|
title repositories and flake inputs relations diagram
|
||||||
|
|
||||||
|
cloud nixpkgs as nixpkgs_transit
|
||||||
|
control "<font:monospaced><size:15>nixos-rebuild" as nixos_rebuild
|
||||||
|
component "SelfPrivacy\nAPI app" as selfprivacy_app
|
||||||
|
component "SelfPrivacy\nNixOS configuration" as nixos_configuration
|
||||||
|
|
||||||
|
note top of nixos_configuration : SelfPrivacy\nAPI service module
|
||||||
|
|
||||||
|
nixos_configuration ).. nixpkgs_transit
|
||||||
|
nixpkgs_transit ..> selfprivacy_app
|
||||||
|
selfprivacy_app --> nixos_configuration
|
||||||
|
[nixpkgs] --> nixos_configuration
|
||||||
|
nixos_configuration -> nixos_rebuild
|
||||||
|
|
||||||
|
footer %date("yyyy-MM-dd'T'HH:mmZ")
|
||||||
|
|
||||||
|
@enduml
|
|
@ -0,0 +1,166 @@
|
||||||
|
selfprivacy-graphql-api: { config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.selfprivacy-api;
|
||||||
|
config-id = "default";
|
||||||
|
nixos-rebuild = "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
|
||||||
|
nix = "${config.nix.package.out}/bin/nix";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.selfprivacy-api = {
|
||||||
|
enable = lib.mkOption {
|
||||||
|
default = true;
|
||||||
|
type = lib.types.bool;
|
||||||
|
description = ''
|
||||||
|
Enable SelfPrivacy API service
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
users.users."selfprivacy-api" = {
|
||||||
|
isNormalUser = false;
|
||||||
|
isSystemUser = true;
|
||||||
|
extraGroups = [ "opendkim" ];
|
||||||
|
group = "selfprivacy-api";
|
||||||
|
};
|
||||||
|
users.groups."selfprivacy-api".members = [ "selfprivacy-api" ];
|
||||||
|
|
||||||
|
systemd.services.selfprivacy-api = {
|
||||||
|
description = "API Server used to control system from the mobile application";
|
||||||
|
environment = config.nix.envVars // {
|
||||||
|
HOME = "/root";
|
||||||
|
PYTHONUNBUFFERED = "1";
|
||||||
|
} // config.networking.proxy.envVars;
|
||||||
|
path = [
|
||||||
|
"/var/"
|
||||||
|
"/var/dkim/"
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.gnutar
|
||||||
|
pkgs.xz.bin
|
||||||
|
pkgs.gzip
|
||||||
|
pkgs.gitMinimal
|
||||||
|
config.nix.package.out
|
||||||
|
pkgs.restic
|
||||||
|
pkgs.mkpasswd
|
||||||
|
pkgs.util-linux
|
||||||
|
pkgs.e2fsprogs
|
||||||
|
pkgs.iproute2
|
||||||
|
];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wantedBy = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
User = "root";
|
||||||
|
ExecStart = "${selfprivacy-graphql-api}/bin/app.py";
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = "5";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
systemd.services.selfprivacy-api-worker = {
|
||||||
|
description = "Task worker for SelfPrivacy API";
|
||||||
|
environment = config.nix.envVars // {
|
||||||
|
HOME = "/root";
|
||||||
|
PYTHONUNBUFFERED = "1";
|
||||||
|
PYTHONPATH =
|
||||||
|
pkgs.python310Packages.makePythonPath [ selfprivacy-graphql-api ];
|
||||||
|
} // config.networking.proxy.envVars;
|
||||||
|
path = [
|
||||||
|
"/var/"
|
||||||
|
"/var/dkim/"
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.gnutar
|
||||||
|
pkgs.xz.bin
|
||||||
|
pkgs.gzip
|
||||||
|
pkgs.gitMinimal
|
||||||
|
config.nix.package.out
|
||||||
|
pkgs.restic
|
||||||
|
pkgs.mkpasswd
|
||||||
|
pkgs.util-linux
|
||||||
|
pkgs.e2fsprogs
|
||||||
|
pkgs.iproute2
|
||||||
|
];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wantedBy = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
User = "root";
|
||||||
|
ExecStart = "${pkgs.python310Packages.huey}/bin/huey_consumer.py selfprivacy_api.task_registry.huey";
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = "5";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# One shot systemd service to rebuild NixOS using nixos-rebuild
|
||||||
|
systemd.services.sp-nixos-rebuild = {
|
||||||
|
description = "nixos-rebuild switch";
|
||||||
|
environment = config.nix.envVars // {
|
||||||
|
HOME = "/root";
|
||||||
|
} // config.networking.proxy.envVars;
|
||||||
|
# TODO figure out how to get dependencies list reliably
|
||||||
|
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
||||||
|
# TODO set proper timeout for reboot instead of service restart
|
||||||
|
serviceConfig = {
|
||||||
|
User = "root";
|
||||||
|
WorkingDirectory = "/etc/nixos";
|
||||||
|
# sync top-level flake with sp-modules sub-flake
|
||||||
|
# (https://github.com/NixOS/nix/issues/9339)
|
||||||
|
ExecStartPre = ''
|
||||||
|
${nix} flake lock --override-input sp-modules path:./sp-modules
|
||||||
|
'';
|
||||||
|
ExecStart = ''
|
||||||
|
${nixos-rebuild} switch --flake .#${config-id}
|
||||||
|
'';
|
||||||
|
KillMode = "none";
|
||||||
|
SendSIGKILL = "no";
|
||||||
|
};
|
||||||
|
restartIfChanged = false;
|
||||||
|
unitConfig.X-StopOnRemoval = false;
|
||||||
|
};
|
||||||
|
# One shot systemd service to upgrade NixOS using nixos-rebuild
|
||||||
|
systemd.services.sp-nixos-upgrade = {
|
||||||
|
# protection against simultaneous runs
|
||||||
|
after = [ "sp-nixos-rebuild.service" ];
|
||||||
|
description = "Upgrade NixOS and SP modules to latest versions";
|
||||||
|
environment = config.nix.envVars // {
|
||||||
|
HOME = "/root";
|
||||||
|
} // config.networking.proxy.envVars;
|
||||||
|
# TODO figure out how to get dependencies list reliably
|
||||||
|
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
||||||
|
serviceConfig = {
|
||||||
|
User = "root";
|
||||||
|
WorkingDirectory = "/etc/nixos";
|
||||||
|
# TODO get URL from systemd template parameter?
|
||||||
|
ExecStartPre = ''
|
||||||
|
${nix} flake update \
|
||||||
|
--override-input selfprivacy-nixos-config git+https://git.selfprivacy.org/SelfPrivacy/selfprivacy-nixos-config.git?ref=flakes
|
||||||
|
'';
|
||||||
|
ExecStart = ''
|
||||||
|
${nixos-rebuild} switch --flake .#${config-id}
|
||||||
|
'';
|
||||||
|
KillMode = "none";
|
||||||
|
SendSIGKILL = "no";
|
||||||
|
};
|
||||||
|
restartIfChanged = false;
|
||||||
|
unitConfig.X-StopOnRemoval = false;
|
||||||
|
};
|
||||||
|
# One shot systemd service to rollback NixOS using nixos-rebuild
|
||||||
|
systemd.services.sp-nixos-rollback = {
|
||||||
|
# protection against simultaneous runs
|
||||||
|
after = [ "sp-nixos-rebuild.service" "sp-nixos-upgrade.service" ];
|
||||||
|
description = "Rollback NixOS using nixos-rebuild";
|
||||||
|
environment = config.nix.envVars // {
|
||||||
|
HOME = "/root";
|
||||||
|
} // config.networking.proxy.envVars;
|
||||||
|
# TODO figure out how to get dependencies list reliably
|
||||||
|
path = [ pkgs.coreutils pkgs.gnutar pkgs.xz.bin pkgs.gzip pkgs.gitMinimal config.nix.package.out ];
|
||||||
|
serviceConfig = {
|
||||||
|
User = "root";
|
||||||
|
WorkingDirectory = "/etc/nixos";
|
||||||
|
ExecStart = ''
|
||||||
|
${nixos-rebuild} switch --rollback --flake .#${config-id}
|
||||||
|
'';
|
||||||
|
KillMode = "none";
|
||||||
|
SendSIGKILL = "no";
|
||||||
|
};
|
||||||
|
restartIfChanged = false;
|
||||||
|
unitConfig.X-StopOnRemoval = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
[build-system]
|
||||||
|
requires = ["setuptools", "wheel", "portalocker"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
|
@ -1,6 +0,0 @@
|
||||||
flask
|
|
||||||
flask_restful
|
|
||||||
pandas
|
|
||||||
ast
|
|
||||||
subprocess
|
|
||||||
os
|
|
|
@ -0,0 +1,181 @@
|
||||||
|
"""
|
||||||
|
App tokens actions.
|
||||||
|
The only actions on tokens that are accessible from APIs
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from mnemonic import Mnemonic
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.timeutils import ensure_tz_aware, ensure_tz_aware_strict
|
||||||
|
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
|
||||||
|
RedisTokensRepository,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.repositories.tokens.exceptions import (
|
||||||
|
TokenNotFound,
|
||||||
|
RecoveryKeyNotFound,
|
||||||
|
InvalidMnemonic,
|
||||||
|
NewDeviceKeyNotFound,
|
||||||
|
)
|
||||||
|
|
||||||
|
TOKEN_REPO = RedisTokensRepository()
|
||||||
|
|
||||||
|
|
||||||
|
class TokenInfoWithIsCaller(BaseModel):
|
||||||
|
"""Token info"""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
date: datetime
|
||||||
|
is_caller: bool
|
||||||
|
|
||||||
|
|
||||||
|
def _naive(date_time: datetime) -> datetime:
|
||||||
|
if date_time is None:
|
||||||
|
return None
|
||||||
|
if date_time.tzinfo is not None:
|
||||||
|
date_time.astimezone(timezone.utc)
|
||||||
|
return date_time.replace(tzinfo=None)
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_tokens_with_caller_flag(caller_token: str) -> list[TokenInfoWithIsCaller]:
|
||||||
|
"""Get the tokens info"""
|
||||||
|
caller_name = TOKEN_REPO.get_token_by_token_string(caller_token).device_name
|
||||||
|
tokens = TOKEN_REPO.get_tokens()
|
||||||
|
return [
|
||||||
|
TokenInfoWithIsCaller(
|
||||||
|
name=token.device_name,
|
||||||
|
date=token.created_at,
|
||||||
|
is_caller=token.device_name == caller_name,
|
||||||
|
)
|
||||||
|
for token in tokens
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def is_token_valid(token) -> bool:
|
||||||
|
"""Check if token is valid"""
|
||||||
|
return TOKEN_REPO.is_token_valid(token)
|
||||||
|
|
||||||
|
|
||||||
|
class NotFoundException(Exception):
|
||||||
|
"""Not found exception"""
|
||||||
|
|
||||||
|
|
||||||
|
class CannotDeleteCallerException(Exception):
|
||||||
|
"""Cannot delete caller exception"""
|
||||||
|
|
||||||
|
|
||||||
|
def delete_api_token(caller_token: str, token_name: str) -> None:
|
||||||
|
"""Delete the token"""
|
||||||
|
if TOKEN_REPO.is_token_name_pair_valid(token_name, caller_token):
|
||||||
|
raise CannotDeleteCallerException("Cannot delete caller's token")
|
||||||
|
if not TOKEN_REPO.is_token_name_exists(token_name):
|
||||||
|
raise NotFoundException("Token not found")
|
||||||
|
token = TOKEN_REPO.get_token_by_name(token_name)
|
||||||
|
TOKEN_REPO.delete_token(token)
|
||||||
|
|
||||||
|
|
||||||
|
def refresh_api_token(caller_token: str) -> str:
|
||||||
|
"""Refresh the token"""
|
||||||
|
try:
|
||||||
|
old_token = TOKEN_REPO.get_token_by_token_string(caller_token)
|
||||||
|
new_token = TOKEN_REPO.refresh_token(old_token)
|
||||||
|
except TokenNotFound:
|
||||||
|
raise NotFoundException("Token not found")
|
||||||
|
return new_token.token
|
||||||
|
|
||||||
|
|
||||||
|
class RecoveryTokenStatus(BaseModel):
|
||||||
|
"""Recovery token status"""
|
||||||
|
|
||||||
|
exists: bool
|
||||||
|
valid: bool
|
||||||
|
date: Optional[datetime] = None
|
||||||
|
expiration: Optional[datetime] = None
|
||||||
|
uses_left: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_recovery_token_status() -> RecoveryTokenStatus:
|
||||||
|
"""Get the recovery token status, timezone-aware"""
|
||||||
|
token = TOKEN_REPO.get_recovery_key()
|
||||||
|
if token is None:
|
||||||
|
return RecoveryTokenStatus(exists=False, valid=False)
|
||||||
|
is_valid = TOKEN_REPO.is_recovery_key_valid()
|
||||||
|
|
||||||
|
# New tokens are tz-aware, but older ones might not be
|
||||||
|
expiry_date = token.expires_at
|
||||||
|
if expiry_date is not None:
|
||||||
|
expiry_date = ensure_tz_aware_strict(expiry_date)
|
||||||
|
|
||||||
|
return RecoveryTokenStatus(
|
||||||
|
exists=True,
|
||||||
|
valid=is_valid,
|
||||||
|
date=ensure_tz_aware_strict(token.created_at),
|
||||||
|
expiration=expiry_date,
|
||||||
|
uses_left=token.uses_left,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidExpirationDate(Exception):
|
||||||
|
"""Invalid expiration date exception"""
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidUsesLeft(Exception):
|
||||||
|
"""Invalid uses left exception"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_new_api_recovery_key(
|
||||||
|
expiration_date: Optional[datetime] = None, uses_left: Optional[int] = None
|
||||||
|
) -> str:
|
||||||
|
"""Get new recovery key"""
|
||||||
|
if expiration_date is not None:
|
||||||
|
expiration_date = ensure_tz_aware(expiration_date)
|
||||||
|
current_time = datetime.now(timezone.utc)
|
||||||
|
if expiration_date < current_time:
|
||||||
|
raise InvalidExpirationDate("Expiration date is in the past")
|
||||||
|
if uses_left is not None:
|
||||||
|
if uses_left <= 0:
|
||||||
|
raise InvalidUsesLeft("Uses must be greater than 0")
|
||||||
|
|
||||||
|
key = TOKEN_REPO.create_recovery_key(expiration_date, uses_left)
|
||||||
|
mnemonic_phrase = Mnemonic(language="english").to_mnemonic(bytes.fromhex(key.key))
|
||||||
|
return mnemonic_phrase
|
||||||
|
|
||||||
|
|
||||||
|
def use_mnemonic_recovery_token(mnemonic_phrase, name):
|
||||||
|
"""Use the recovery token by converting the mnemonic word list to a byte array.
|
||||||
|
If the recovery token if invalid itself, return None
|
||||||
|
If the binary representation of phrase not matches
|
||||||
|
the byte array of the recovery token, return None.
|
||||||
|
If the mnemonic phrase is valid then generate a device token and return it.
|
||||||
|
Substract 1 from uses_left if it exists.
|
||||||
|
mnemonic_phrase is a string representation of the mnemonic word list.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
token = TOKEN_REPO.use_mnemonic_recovery_key(mnemonic_phrase, name)
|
||||||
|
return token.token
|
||||||
|
except (RecoveryKeyNotFound, InvalidMnemonic):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def delete_new_device_auth_token() -> None:
|
||||||
|
TOKEN_REPO.delete_new_device_key()
|
||||||
|
|
||||||
|
|
||||||
|
def get_new_device_auth_token() -> str:
|
||||||
|
"""Generate and store a new device auth token which is valid for 10 minutes
|
||||||
|
and return a mnemonic phrase representation
|
||||||
|
"""
|
||||||
|
key = TOKEN_REPO.get_new_device_key()
|
||||||
|
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(key.key))
|
||||||
|
|
||||||
|
|
||||||
|
def use_new_device_auth_token(mnemonic_phrase, name) -> Optional[str]:
|
||||||
|
"""Use the new device auth token by converting the mnemonic string to a byte array.
|
||||||
|
If the mnemonic phrase is valid then generate a device token and return it.
|
||||||
|
New device auth token must be deleted.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
token = TOKEN_REPO.use_mnemonic_new_device_key(mnemonic_phrase, name)
|
||||||
|
return token.token
|
||||||
|
except (NewDeviceKeyNotFound, InvalidMnemonic):
|
||||||
|
return None
|
|
@ -0,0 +1,34 @@
|
||||||
|
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||||
|
from selfprivacy_api.jobs import Jobs, Job
|
||||||
|
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
from selfprivacy_api.services.tasks import move_service as move_service_task
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceNotFoundError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeNotFoundError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def move_service(service_id: str, volume_name: str) -> Job:
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
raise ServiceNotFoundError(f"No such service:{service_id}")
|
||||||
|
|
||||||
|
volume = BlockDevices().get_block_device(volume_name)
|
||||||
|
if volume is None:
|
||||||
|
raise VolumeNotFoundError(f"No such volume:{volume_name}")
|
||||||
|
|
||||||
|
service.assert_can_move(volume)
|
||||||
|
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id=f"services.{service.get_id()}.move",
|
||||||
|
name=f"Move {service.get_display_name()}",
|
||||||
|
description=f"Moving {service.get_display_name()} data to {volume.name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
move_service_task(service, volume, job)
|
||||||
|
return job
|
|
@ -0,0 +1,136 @@
|
||||||
|
"""Actions to manage the SSH."""
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from selfprivacy_api.actions.users import (
|
||||||
|
UserNotFound,
|
||||||
|
ensure_ssh_and_users_fields_exist,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.utils import WriteUserData, ReadUserData, validate_ssh_public_key
|
||||||
|
|
||||||
|
|
||||||
|
def enable_ssh():
|
||||||
|
with WriteUserData() as data:
|
||||||
|
if "ssh" not in data:
|
||||||
|
data["ssh"] = {}
|
||||||
|
data["ssh"]["enable"] = True
|
||||||
|
|
||||||
|
|
||||||
|
class UserdataSshSettings(BaseModel):
|
||||||
|
"""Settings for the SSH."""
|
||||||
|
|
||||||
|
enable: bool = True
|
||||||
|
passwordAuthentication: bool = True
|
||||||
|
rootKeys: list[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
def get_ssh_settings() -> UserdataSshSettings:
|
||||||
|
with ReadUserData() as data:
|
||||||
|
if "ssh" not in data:
|
||||||
|
return UserdataSshSettings()
|
||||||
|
if "enable" not in data["ssh"]:
|
||||||
|
data["ssh"]["enable"] = True
|
||||||
|
if "passwordAuthentication" not in data["ssh"]:
|
||||||
|
data["ssh"]["passwordAuthentication"] = False
|
||||||
|
if "rootKeys" not in data["ssh"]:
|
||||||
|
data["ssh"]["rootKeys"] = []
|
||||||
|
return UserdataSshSettings(**data["ssh"])
|
||||||
|
|
||||||
|
|
||||||
|
def set_ssh_settings(
|
||||||
|
enable: Optional[bool] = None, password_authentication: Optional[bool] = None
|
||||||
|
) -> None:
|
||||||
|
with WriteUserData() as data:
|
||||||
|
if "ssh" not in data:
|
||||||
|
data["ssh"] = {}
|
||||||
|
if enable is not None:
|
||||||
|
data["ssh"]["enable"] = enable
|
||||||
|
if password_authentication is not None:
|
||||||
|
data["ssh"]["passwordAuthentication"] = password_authentication
|
||||||
|
|
||||||
|
|
||||||
|
class KeyAlreadyExists(Exception):
|
||||||
|
"""Key already exists"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPublicKey(Exception):
|
||||||
|
"""Invalid public key"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_ssh_key(username: str, ssh_key: str):
|
||||||
|
"""Create a new ssh key"""
|
||||||
|
|
||||||
|
if not validate_ssh_public_key(ssh_key):
|
||||||
|
raise InvalidPublicKey()
|
||||||
|
|
||||||
|
with WriteUserData() as data:
|
||||||
|
ensure_ssh_and_users_fields_exist(data)
|
||||||
|
|
||||||
|
if username == data["username"]:
|
||||||
|
if ssh_key in data["sshKeys"]:
|
||||||
|
raise KeyAlreadyExists()
|
||||||
|
|
||||||
|
data["sshKeys"].append(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
if username == "root":
|
||||||
|
if ssh_key in data["ssh"]["rootKeys"]:
|
||||||
|
raise KeyAlreadyExists()
|
||||||
|
|
||||||
|
data["ssh"]["rootKeys"].append(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
for user in data["users"]:
|
||||||
|
if user["username"] == username:
|
||||||
|
if "sshKeys" not in user:
|
||||||
|
user["sshKeys"] = []
|
||||||
|
if ssh_key in user["sshKeys"]:
|
||||||
|
raise KeyAlreadyExists()
|
||||||
|
|
||||||
|
user["sshKeys"].append(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
raise UserNotFound()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyNotFound(Exception):
|
||||||
|
"""Key not found"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def remove_ssh_key(username: str, ssh_key: str):
|
||||||
|
"""Delete a ssh key"""
|
||||||
|
|
||||||
|
with WriteUserData() as data:
|
||||||
|
ensure_ssh_and_users_fields_exist(data)
|
||||||
|
|
||||||
|
if username == "root":
|
||||||
|
if ssh_key in data["ssh"]["rootKeys"]:
|
||||||
|
data["ssh"]["rootKeys"].remove(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
raise KeyNotFound()
|
||||||
|
|
||||||
|
if username == data["username"]:
|
||||||
|
if ssh_key in data["sshKeys"]:
|
||||||
|
data["sshKeys"].remove(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
raise KeyNotFound()
|
||||||
|
|
||||||
|
for user in data["users"]:
|
||||||
|
if user["username"] == username:
|
||||||
|
if "sshKeys" not in user:
|
||||||
|
user["sshKeys"] = []
|
||||||
|
if ssh_key in user["sshKeys"]:
|
||||||
|
user["sshKeys"].remove(ssh_key)
|
||||||
|
return
|
||||||
|
|
||||||
|
raise KeyNotFound()
|
||||||
|
|
||||||
|
raise UserNotFound()
|
|
@ -0,0 +1,173 @@
|
||||||
|
"""Actions to manage the system."""
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import pytz
|
||||||
|
from typing import Optional, List
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
||||||
|
from selfprivacy_api.jobs.upgrade_system import rebuild_system_task
|
||||||
|
|
||||||
|
from selfprivacy_api.utils import WriteUserData, ReadUserData
|
||||||
|
|
||||||
|
|
||||||
|
def get_timezone() -> str:
|
||||||
|
"""Get the timezone of the server"""
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
if "timezone" in user_data:
|
||||||
|
return user_data["timezone"]
|
||||||
|
return "Etc/UTC"
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidTimezone(Exception):
|
||||||
|
"""Invalid timezone"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def change_timezone(timezone: str) -> None:
|
||||||
|
"""Change the timezone of the server"""
|
||||||
|
if timezone not in pytz.all_timezones:
|
||||||
|
raise InvalidTimezone(f"Invalid timezone: {timezone}")
|
||||||
|
with WriteUserData() as user_data:
|
||||||
|
user_data["timezone"] = timezone
|
||||||
|
|
||||||
|
|
||||||
|
class UserDataAutoUpgradeSettings(BaseModel):
|
||||||
|
"""Settings for auto-upgrading user data"""
|
||||||
|
|
||||||
|
enable: bool = True
|
||||||
|
allowReboot: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
def get_auto_upgrade_settings() -> UserDataAutoUpgradeSettings:
|
||||||
|
"""Get the auto-upgrade settings"""
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
if "autoUpgrade" in user_data:
|
||||||
|
return UserDataAutoUpgradeSettings(**user_data["autoUpgrade"])
|
||||||
|
return UserDataAutoUpgradeSettings()
|
||||||
|
|
||||||
|
|
||||||
|
def set_auto_upgrade_settings(
|
||||||
|
enalbe: Optional[bool] = None, allowReboot: Optional[bool] = None
|
||||||
|
) -> None:
|
||||||
|
"""Set the auto-upgrade settings"""
|
||||||
|
with WriteUserData() as user_data:
|
||||||
|
if "autoUpgrade" not in user_data:
|
||||||
|
user_data["autoUpgrade"] = {}
|
||||||
|
if enalbe is not None:
|
||||||
|
user_data["autoUpgrade"]["enable"] = enalbe
|
||||||
|
if allowReboot is not None:
|
||||||
|
user_data["autoUpgrade"]["allowReboot"] = allowReboot
|
||||||
|
|
||||||
|
|
||||||
|
class ShellException(Exception):
|
||||||
|
"""Something went wrong when calling another process"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def run_blocking(cmd: List[str], new_session: bool = False) -> str:
|
||||||
|
"""Run a process, block until done, return output, complain if failed"""
|
||||||
|
process_handle = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
shell=False,
|
||||||
|
start_new_session=new_session,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
stdout_raw, stderr_raw = process_handle.communicate()
|
||||||
|
stdout = stdout_raw.decode("utf-8")
|
||||||
|
if stderr_raw is not None:
|
||||||
|
stderr = stderr_raw.decode("utf-8")
|
||||||
|
else:
|
||||||
|
stderr = ""
|
||||||
|
output = stdout + "\n" + stderr
|
||||||
|
if process_handle.returncode != 0:
|
||||||
|
raise ShellException(
|
||||||
|
f"Shell command failed, command array: {cmd}, output: {output}"
|
||||||
|
)
|
||||||
|
return stdout
|
||||||
|
|
||||||
|
|
||||||
|
def rebuild_system() -> Job:
|
||||||
|
"""Rebuild the system"""
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id="system.nixos.rebuild",
|
||||||
|
name="Rebuild system",
|
||||||
|
description="Applying the new system configuration by building the new NixOS generation.",
|
||||||
|
status=JobStatus.CREATED,
|
||||||
|
)
|
||||||
|
rebuild_system_task(job)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def rollback_system() -> int:
|
||||||
|
"""Rollback the system"""
|
||||||
|
run_blocking(["systemctl", "start", "sp-nixos-rollback.service"], new_session=True)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade_system() -> Job:
|
||||||
|
"""Upgrade the system"""
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id="system.nixos.upgrade",
|
||||||
|
name="Upgrade system",
|
||||||
|
description="Upgrading the system to the latest version.",
|
||||||
|
status=JobStatus.CREATED,
|
||||||
|
)
|
||||||
|
rebuild_system_task(job, upgrade=True)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def reboot_system() -> None:
|
||||||
|
"""Reboot the system"""
|
||||||
|
run_blocking(["reboot"], new_session=True)
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_version() -> str:
|
||||||
|
"""Get system version"""
|
||||||
|
return subprocess.check_output(["uname", "-a"]).decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
|
def get_python_version() -> str:
|
||||||
|
"""Get Python version"""
|
||||||
|
return subprocess.check_output(["python", "-V"]).decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
|
class SystemActionResult(BaseModel):
|
||||||
|
"""System action result"""
|
||||||
|
|
||||||
|
status: int
|
||||||
|
message: str
|
||||||
|
data: str
|
||||||
|
|
||||||
|
|
||||||
|
def pull_repository_changes() -> SystemActionResult:
|
||||||
|
"""Pull repository changes"""
|
||||||
|
git_pull_command = ["git", "pull"]
|
||||||
|
|
||||||
|
current_working_directory = os.getcwd()
|
||||||
|
os.chdir("/etc/nixos")
|
||||||
|
|
||||||
|
git_pull_process_descriptor = subprocess.Popen(
|
||||||
|
git_pull_command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
shell=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
data = git_pull_process_descriptor.communicate()[0].decode("utf-8")
|
||||||
|
|
||||||
|
os.chdir(current_working_directory)
|
||||||
|
|
||||||
|
if git_pull_process_descriptor.returncode == 0:
|
||||||
|
return SystemActionResult(
|
||||||
|
status=0,
|
||||||
|
message="Pulled repository changes",
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
return SystemActionResult(
|
||||||
|
status=git_pull_process_descriptor.returncode,
|
||||||
|
message="Failed to pull repository changes",
|
||||||
|
data=data,
|
||||||
|
)
|
|
@ -0,0 +1,229 @@
|
||||||
|
"""Actions to manage the users."""
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from enum import Enum
|
||||||
|
from selfprivacy_api.utils import (
|
||||||
|
ReadUserData,
|
||||||
|
WriteUserData,
|
||||||
|
hash_password,
|
||||||
|
is_username_forbidden,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserDataUserOrigin(Enum):
|
||||||
|
"""Origin of the user in the user data"""
|
||||||
|
|
||||||
|
NORMAL = "NORMAL"
|
||||||
|
PRIMARY = "PRIMARY"
|
||||||
|
ROOT = "ROOT"
|
||||||
|
|
||||||
|
|
||||||
|
class UserDataUser(BaseModel):
|
||||||
|
"""The user model from the userdata file"""
|
||||||
|
|
||||||
|
username: str
|
||||||
|
ssh_keys: list[str]
|
||||||
|
origin: UserDataUserOrigin
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ssh_and_users_fields_exist(data):
|
||||||
|
if "ssh" not in data:
|
||||||
|
data["ssh"] = {}
|
||||||
|
data["ssh"]["rootKeys"] = []
|
||||||
|
|
||||||
|
elif data["ssh"].get("rootKeys") is None:
|
||||||
|
data["ssh"]["rootKeys"] = []
|
||||||
|
|
||||||
|
if "sshKeys" not in data:
|
||||||
|
data["sshKeys"] = []
|
||||||
|
|
||||||
|
if "users" not in data:
|
||||||
|
data["users"] = []
|
||||||
|
|
||||||
|
|
||||||
|
def get_users(
|
||||||
|
exclude_primary: bool = False,
|
||||||
|
exclude_root: bool = False,
|
||||||
|
) -> list[UserDataUser]:
|
||||||
|
"""Get the list of users"""
|
||||||
|
users = []
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
ensure_ssh_and_users_fields_exist(user_data)
|
||||||
|
users = [
|
||||||
|
UserDataUser(
|
||||||
|
username=user["username"],
|
||||||
|
ssh_keys=user.get("sshKeys", []),
|
||||||
|
origin=UserDataUserOrigin.NORMAL,
|
||||||
|
)
|
||||||
|
for user in user_data["users"]
|
||||||
|
]
|
||||||
|
if not exclude_primary and "username" in user_data.keys():
|
||||||
|
users.append(
|
||||||
|
UserDataUser(
|
||||||
|
username=user_data["username"],
|
||||||
|
ssh_keys=user_data["sshKeys"],
|
||||||
|
origin=UserDataUserOrigin.PRIMARY,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not exclude_root:
|
||||||
|
users.append(
|
||||||
|
UserDataUser(
|
||||||
|
username="root",
|
||||||
|
ssh_keys=user_data["ssh"]["rootKeys"],
|
||||||
|
origin=UserDataUserOrigin.ROOT,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return users
|
||||||
|
|
||||||
|
|
||||||
|
class UsernameForbidden(Exception):
|
||||||
|
"""Attemted to create a user with a forbidden username"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UserAlreadyExists(Exception):
|
||||||
|
"""Attemted to create a user that already exists"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UsernameNotAlphanumeric(Exception):
|
||||||
|
"""Attemted to create a user with a non-alphanumeric username"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UsernameTooLong(Exception):
|
||||||
|
"""Attemted to create a user with a too long username. Username must be less than 32 characters"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PasswordIsEmpty(Exception):
|
||||||
|
"""Attemted to create a user with an empty password"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidConfiguration(Exception):
|
||||||
|
"""The userdata is broken"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_user(username: str, password: str):
|
||||||
|
if password == "":
|
||||||
|
raise PasswordIsEmpty("Password is empty")
|
||||||
|
|
||||||
|
if is_username_forbidden(username):
|
||||||
|
raise UsernameForbidden("Username is forbidden")
|
||||||
|
|
||||||
|
if not re.match(r"^[a-z_][a-z0-9_]+$", username):
|
||||||
|
raise UsernameNotAlphanumeric(
|
||||||
|
"Username must be alphanumeric and start with a letter"
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(username) >= 32:
|
||||||
|
raise UsernameTooLong("Username must be less than 32 characters")
|
||||||
|
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
ensure_ssh_and_users_fields_exist(user_data)
|
||||||
|
if "username" not in user_data.keys():
|
||||||
|
raise InvalidConfiguration(
|
||||||
|
"Broken config: Admin name is not defined. Consider recovery or add it manually"
|
||||||
|
)
|
||||||
|
if username == user_data["username"]:
|
||||||
|
raise UserAlreadyExists("User already exists")
|
||||||
|
if username in [user["username"] for user in user_data["users"]]:
|
||||||
|
raise UserAlreadyExists("User already exists")
|
||||||
|
|
||||||
|
hashed_password = hash_password(password)
|
||||||
|
|
||||||
|
with WriteUserData() as user_data:
|
||||||
|
ensure_ssh_and_users_fields_exist(user_data)
|
||||||
|
|
||||||
|
user_data["users"].append(
|
||||||
|
{"username": username, "sshKeys": [], "hashedPassword": hashed_password}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class UserNotFound(Exception):
|
||||||
|
"""Attemted to get a user that does not exist"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UserIsProtected(Exception):
|
||||||
|
"""Attemted to delete a user that is protected"""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def delete_user(username: str):
|
||||||
|
with WriteUserData() as user_data:
|
||||||
|
ensure_ssh_and_users_fields_exist(user_data)
|
||||||
|
if username == user_data["username"] or username == "root":
|
||||||
|
raise UserIsProtected("Cannot delete main or root user")
|
||||||
|
|
||||||
|
for data_user in user_data["users"]:
|
||||||
|
if data_user["username"] == username:
|
||||||
|
user_data["users"].remove(data_user)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise UserNotFound("User did not exist")
|
||||||
|
|
||||||
|
|
||||||
|
def update_user(username: str, password: str):
|
||||||
|
if password == "":
|
||||||
|
raise PasswordIsEmpty("Password is empty")
|
||||||
|
|
||||||
|
hashed_password = hash_password(password)
|
||||||
|
|
||||||
|
with WriteUserData() as data:
|
||||||
|
ensure_ssh_and_users_fields_exist(data)
|
||||||
|
|
||||||
|
if username == data["username"]:
|
||||||
|
data["hashedMasterPassword"] = hashed_password
|
||||||
|
|
||||||
|
# Return 404 if user does not exist
|
||||||
|
else:
|
||||||
|
for data_user in data["users"]:
|
||||||
|
if data_user["username"] == username:
|
||||||
|
data_user["hashedPassword"] = hashed_password
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise UserNotFound("User does not exist")
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_by_username(username: str) -> Optional[UserDataUser]:
|
||||||
|
with ReadUserData() as data:
|
||||||
|
ensure_ssh_and_users_fields_exist(data)
|
||||||
|
|
||||||
|
if username == "root":
|
||||||
|
return UserDataUser(
|
||||||
|
origin=UserDataUserOrigin.ROOT,
|
||||||
|
username="root",
|
||||||
|
ssh_keys=data["ssh"]["rootKeys"],
|
||||||
|
)
|
||||||
|
|
||||||
|
if username == data["username"]:
|
||||||
|
return UserDataUser(
|
||||||
|
origin=UserDataUserOrigin.PRIMARY,
|
||||||
|
username=username,
|
||||||
|
ssh_keys=data["sshKeys"],
|
||||||
|
)
|
||||||
|
|
||||||
|
for user in data["users"]:
|
||||||
|
if user["username"] == username:
|
||||||
|
if "sshKeys" not in user:
|
||||||
|
user["sshKeys"] = []
|
||||||
|
|
||||||
|
return UserDataUser(
|
||||||
|
origin=UserDataUserOrigin.NORMAL,
|
||||||
|
username=username,
|
||||||
|
ssh_keys=user["sshKeys"],
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
"""SelfPrivacy server management API"""
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from strawberry.fastapi import GraphQLRouter
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
from selfprivacy_api.dependencies import get_api_version
|
||||||
|
from selfprivacy_api.graphql.schema import schema
|
||||||
|
from selfprivacy_api.migrations import run_migrations
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
|
||||||
|
graphql_app = GraphQLRouter(
|
||||||
|
schema,
|
||||||
|
)
|
||||||
|
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=["*"],
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
app.include_router(graphql_app, prefix="/graphql")
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/api/version")
|
||||||
|
async def get_version():
|
||||||
|
"""Get the version of the server"""
|
||||||
|
return {"version": get_api_version()}
|
||||||
|
|
||||||
|
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup():
|
||||||
|
run_migrations()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
uvicorn.run(
|
||||||
|
"selfprivacy_api.app:app", host="127.0.0.1", port=5050, log_level="info"
|
||||||
|
)
|
|
@ -0,0 +1,741 @@
|
||||||
|
"""
|
||||||
|
This module contains the controller class for backups.
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
from os import statvfs
|
||||||
|
from typing import Callable, List, Optional
|
||||||
|
|
||||||
|
from selfprivacy_api.services import (
|
||||||
|
get_service_by_id,
|
||||||
|
get_all_services,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.services.service import (
|
||||||
|
Service,
|
||||||
|
ServiceStatus,
|
||||||
|
StoppedService,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import (
|
||||||
|
RestoreStrategy,
|
||||||
|
BackupReason,
|
||||||
|
AutobackupQuotas,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
|
||||||
|
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.providers import get_provider
|
||||||
|
from selfprivacy_api.backup.storage import Storage
|
||||||
|
from selfprivacy_api.backup.jobs import (
|
||||||
|
get_backup_job,
|
||||||
|
get_backup_fail,
|
||||||
|
add_backup_job,
|
||||||
|
get_restore_job,
|
||||||
|
add_restore_job,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
BACKUP_PROVIDER_ENVS = {
|
||||||
|
"kind": "BACKUP_KIND",
|
||||||
|
"login": "BACKUP_LOGIN",
|
||||||
|
"key": "BACKUP_KEY",
|
||||||
|
"location": "BACKUP_LOCATION",
|
||||||
|
}
|
||||||
|
|
||||||
|
AUTOBACKUP_JOB_EXPIRATION_SECONDS = 60 * 60 # one hour
|
||||||
|
|
||||||
|
|
||||||
|
class NotDeadError(AssertionError):
|
||||||
|
"""
|
||||||
|
This error is raised when we try to back up a service that is not dead yet.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, service: Service):
|
||||||
|
self.service_name = service.get_id()
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"""
|
||||||
|
Service {self.service_name} should be either stopped or dead from
|
||||||
|
an error before we back up.
|
||||||
|
Normally, this error is unreachable because we do try ensure this.
|
||||||
|
Apparently, not this time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class RotationBucket:
|
||||||
|
"""
|
||||||
|
Bucket object used for rotation.
|
||||||
|
Has the following mutable fields:
|
||||||
|
- the counter, int
|
||||||
|
- the lambda function which takes datetime and the int and returns the int
|
||||||
|
- the last, int
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, counter: int, last: int, rotation_lambda):
|
||||||
|
self.counter: int = counter
|
||||||
|
self.last: int = last
|
||||||
|
self.rotation_lambda: Callable[[datetime, int], int] = rotation_lambda
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"Bucket(counter={self.counter}, last={self.last})"
|
||||||
|
|
||||||
|
|
||||||
|
class Backups:
|
||||||
|
"""A stateless controller class for backups"""
|
||||||
|
|
||||||
|
# Providers
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def provider() -> AbstractBackupProvider:
|
||||||
|
"""
|
||||||
|
Returns the current backup storage provider.
|
||||||
|
"""
|
||||||
|
return Backups._lookup_provider()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_provider(
|
||||||
|
kind: BackupProviderEnum,
|
||||||
|
login: str,
|
||||||
|
key: str,
|
||||||
|
location: str,
|
||||||
|
repo_id: str = "",
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Sets the new configuration of the backup storage provider.
|
||||||
|
|
||||||
|
In case of `BackupProviderEnum.BACKBLAZE`, the `login` is the key ID,
|
||||||
|
the `key` is the key itself, and the `location` is the bucket name and
|
||||||
|
the `repo_id` is the bucket ID.
|
||||||
|
"""
|
||||||
|
provider: AbstractBackupProvider = Backups._construct_provider(
|
||||||
|
kind,
|
||||||
|
login,
|
||||||
|
key,
|
||||||
|
location,
|
||||||
|
repo_id,
|
||||||
|
)
|
||||||
|
Storage.store_provider(provider)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reset() -> None:
|
||||||
|
"""
|
||||||
|
Deletes all the data about the backup storage provider.
|
||||||
|
"""
|
||||||
|
Storage.reset()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _lookup_provider() -> AbstractBackupProvider:
|
||||||
|
redis_provider = Backups._load_provider_redis()
|
||||||
|
if redis_provider is not None:
|
||||||
|
return redis_provider
|
||||||
|
|
||||||
|
none_provider = Backups._construct_provider(
|
||||||
|
BackupProviderEnum.NONE, login="", key="", location=""
|
||||||
|
)
|
||||||
|
Storage.store_provider(none_provider)
|
||||||
|
return none_provider
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_provider_from_envs():
|
||||||
|
for env in BACKUP_PROVIDER_ENVS.values():
|
||||||
|
if env not in os.environ.keys():
|
||||||
|
raise ValueError(
|
||||||
|
f"Cannot set backup provider from envs, there is no {env} set"
|
||||||
|
)
|
||||||
|
|
||||||
|
kind_str = os.environ[BACKUP_PROVIDER_ENVS["kind"]]
|
||||||
|
kind_enum = BackupProviderEnum[kind_str]
|
||||||
|
provider = Backups._construct_provider(
|
||||||
|
kind=kind_enum,
|
||||||
|
login=os.environ[BACKUP_PROVIDER_ENVS["login"]],
|
||||||
|
key=os.environ[BACKUP_PROVIDER_ENVS["key"]],
|
||||||
|
location=os.environ[BACKUP_PROVIDER_ENVS["location"]],
|
||||||
|
)
|
||||||
|
Storage.store_provider(provider)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _construct_provider(
|
||||||
|
kind: BackupProviderEnum,
|
||||||
|
login: str,
|
||||||
|
key: str,
|
||||||
|
location: str,
|
||||||
|
repo_id: str = "",
|
||||||
|
) -> AbstractBackupProvider:
|
||||||
|
provider_class = get_provider(kind)
|
||||||
|
|
||||||
|
return provider_class(
|
||||||
|
login=login,
|
||||||
|
key=key,
|
||||||
|
location=location,
|
||||||
|
repo_id=repo_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _load_provider_redis() -> Optional[AbstractBackupProvider]:
|
||||||
|
provider_model = Storage.load_provider()
|
||||||
|
if provider_model is None:
|
||||||
|
return None
|
||||||
|
return Backups._construct_provider(
|
||||||
|
BackupProviderEnum[provider_model.kind],
|
||||||
|
provider_model.login,
|
||||||
|
provider_model.key,
|
||||||
|
provider_model.location,
|
||||||
|
provider_model.repo_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Init
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def init_repo() -> None:
|
||||||
|
"""
|
||||||
|
Initializes the backup repository. This is required once per repo.
|
||||||
|
"""
|
||||||
|
Backups.provider().backupper.init()
|
||||||
|
Storage.mark_as_init()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def erase_repo() -> None:
|
||||||
|
"""
|
||||||
|
Completely empties the remote
|
||||||
|
"""
|
||||||
|
Backups.provider().backupper.erase_repo()
|
||||||
|
Storage.mark_as_uninitted()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_initted() -> bool:
|
||||||
|
"""
|
||||||
|
Returns whether the backup repository is initialized or not.
|
||||||
|
If it is not initialized, we cannot back up and probably should
|
||||||
|
call `init_repo` first.
|
||||||
|
"""
|
||||||
|
if Storage.has_init_mark():
|
||||||
|
return True
|
||||||
|
|
||||||
|
initted = Backups.provider().backupper.is_initted()
|
||||||
|
if initted:
|
||||||
|
Storage.mark_as_init()
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Backup
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def back_up(
|
||||||
|
service: Service, reason: BackupReason = BackupReason.EXPLICIT
|
||||||
|
) -> Snapshot:
|
||||||
|
"""The top-level function to back up a service
|
||||||
|
If it fails for any reason at all, it should both mark job as
|
||||||
|
errored and re-raise an error"""
|
||||||
|
|
||||||
|
job = get_backup_job(service)
|
||||||
|
if job is None:
|
||||||
|
job = add_backup_job(service)
|
||||||
|
Jobs.update(job, status=JobStatus.RUNNING)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if service.can_be_backed_up() is False:
|
||||||
|
raise ValueError("cannot backup a non-backuppable service")
|
||||||
|
folders = service.get_folders()
|
||||||
|
service_name = service.get_id()
|
||||||
|
service.pre_backup()
|
||||||
|
snapshot = Backups.provider().backupper.start_backup(
|
||||||
|
folders,
|
||||||
|
service_name,
|
||||||
|
reason=reason,
|
||||||
|
)
|
||||||
|
|
||||||
|
Backups._on_new_snapshot_created(service_name, snapshot)
|
||||||
|
if reason == BackupReason.AUTO:
|
||||||
|
Backups._prune_auto_snaps(service)
|
||||||
|
service.post_restore()
|
||||||
|
except Exception as error:
|
||||||
|
Jobs.update(job, status=JobStatus.ERROR, error=str(error))
|
||||||
|
raise error
|
||||||
|
|
||||||
|
Jobs.update(job, status=JobStatus.FINISHED)
|
||||||
|
if reason in [BackupReason.AUTO, BackupReason.PRE_RESTORE]:
|
||||||
|
Jobs.set_expiration(job, AUTOBACKUP_JOB_EXPIRATION_SECONDS)
|
||||||
|
return Backups.sync_date_from_cache(snapshot)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def sync_date_from_cache(snapshot: Snapshot) -> Snapshot:
|
||||||
|
"""
|
||||||
|
Our snapshot creation dates are different from those on server by a tiny amount.
|
||||||
|
This is a convenience, maybe it is better to write a special comparison
|
||||||
|
function for snapshots
|
||||||
|
"""
|
||||||
|
return Storage.get_cached_snapshot_by_id(snapshot.id)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _auto_snaps(service):
|
||||||
|
return [
|
||||||
|
snap
|
||||||
|
for snap in Backups.get_snapshots(service)
|
||||||
|
if snap.reason == BackupReason.AUTO
|
||||||
|
]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _prune_snaps_with_quotas(snapshots: List[Snapshot]) -> List[Snapshot]:
|
||||||
|
# Function broken out for testability
|
||||||
|
# Sorting newest first
|
||||||
|
sorted_snaps = sorted(snapshots, key=lambda s: s.created_at, reverse=True)
|
||||||
|
quotas: AutobackupQuotas = Backups.autobackup_quotas()
|
||||||
|
|
||||||
|
buckets: list[RotationBucket] = [
|
||||||
|
RotationBucket(
|
||||||
|
quotas.last, # type: ignore
|
||||||
|
-1,
|
||||||
|
lambda _, index: index,
|
||||||
|
),
|
||||||
|
RotationBucket(
|
||||||
|
quotas.daily, # type: ignore
|
||||||
|
-1,
|
||||||
|
lambda date, _: date.year * 10000 + date.month * 100 + date.day,
|
||||||
|
),
|
||||||
|
RotationBucket(
|
||||||
|
quotas.weekly, # type: ignore
|
||||||
|
-1,
|
||||||
|
lambda date, _: date.year * 100 + date.isocalendar()[1],
|
||||||
|
),
|
||||||
|
RotationBucket(
|
||||||
|
quotas.monthly, # type: ignore
|
||||||
|
-1,
|
||||||
|
lambda date, _: date.year * 100 + date.month,
|
||||||
|
),
|
||||||
|
RotationBucket(
|
||||||
|
quotas.yearly, # type: ignore
|
||||||
|
-1,
|
||||||
|
lambda date, _: date.year,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
new_snaplist: List[Snapshot] = []
|
||||||
|
for i, snap in enumerate(sorted_snaps):
|
||||||
|
keep_snap = False
|
||||||
|
for bucket in buckets:
|
||||||
|
if (bucket.counter > 0) or (bucket.counter == -1):
|
||||||
|
val = bucket.rotation_lambda(snap.created_at, i)
|
||||||
|
if (val != bucket.last) or (i == len(sorted_snaps) - 1):
|
||||||
|
bucket.last = val
|
||||||
|
if bucket.counter > 0:
|
||||||
|
bucket.counter -= 1
|
||||||
|
if not keep_snap:
|
||||||
|
new_snaplist.append(snap)
|
||||||
|
keep_snap = True
|
||||||
|
|
||||||
|
return new_snaplist
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _prune_auto_snaps(service) -> None:
|
||||||
|
# Not very testable by itself, so most testing is going on Backups._prune_snaps_with_quotas
|
||||||
|
# We can still test total limits and, say, daily limits
|
||||||
|
|
||||||
|
auto_snaps = Backups._auto_snaps(service)
|
||||||
|
new_snaplist = Backups._prune_snaps_with_quotas(auto_snaps)
|
||||||
|
|
||||||
|
deletable_snaps = [snap for snap in auto_snaps if snap not in new_snaplist]
|
||||||
|
Backups.forget_snapshots(deletable_snaps)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _standardize_quotas(i: int) -> int:
|
||||||
|
if i <= -1:
|
||||||
|
i = -1
|
||||||
|
return i
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def autobackup_quotas() -> AutobackupQuotas:
|
||||||
|
"""0 means do not keep, -1 means unlimited"""
|
||||||
|
|
||||||
|
return Storage.autobackup_quotas()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
|
||||||
|
"""0 means do not keep, -1 means unlimited"""
|
||||||
|
|
||||||
|
Storage.set_autobackup_quotas(
|
||||||
|
AutobackupQuotas(
|
||||||
|
last=Backups._standardize_quotas(quotas.last), # type: ignore
|
||||||
|
daily=Backups._standardize_quotas(quotas.daily), # type: ignore
|
||||||
|
weekly=Backups._standardize_quotas(quotas.weekly), # type: ignore
|
||||||
|
monthly=Backups._standardize_quotas(quotas.monthly), # type: ignore
|
||||||
|
yearly=Backups._standardize_quotas(quotas.yearly), # type: ignore
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# do not prune all autosnaps right away, this will be done by an async task
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def prune_all_autosnaps() -> None:
|
||||||
|
for service in get_all_services():
|
||||||
|
Backups._prune_auto_snaps(service)
|
||||||
|
|
||||||
|
# Restoring
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _ensure_queued_restore_job(service, snapshot) -> Job:
|
||||||
|
job = get_restore_job(service)
|
||||||
|
if job is None:
|
||||||
|
job = add_restore_job(snapshot)
|
||||||
|
|
||||||
|
Jobs.update(job, status=JobStatus.CREATED)
|
||||||
|
return job
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _inplace_restore(
|
||||||
|
service: Service,
|
||||||
|
snapshot: Snapshot,
|
||||||
|
job: Job,
|
||||||
|
) -> None:
|
||||||
|
Jobs.update(
|
||||||
|
job, status=JobStatus.CREATED, status_text="Waiting for pre-restore backup"
|
||||||
|
)
|
||||||
|
failsafe_snapshot = Backups.back_up(service, BackupReason.PRE_RESTORE)
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job, status=JobStatus.RUNNING, status_text=f"Restoring from {snapshot.id}"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
Backups._restore_service_from_snapshot(
|
||||||
|
service,
|
||||||
|
snapshot.id,
|
||||||
|
verify=False,
|
||||||
|
)
|
||||||
|
except Exception as error:
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
status_text=f"Restore failed with {str(error)}, reverting to {failsafe_snapshot.id}",
|
||||||
|
)
|
||||||
|
Backups._restore_service_from_snapshot(
|
||||||
|
service, failsafe_snapshot.id, verify=False
|
||||||
|
)
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
status_text=f"Restore failed with {str(error)}, reverted to {failsafe_snapshot.id}",
|
||||||
|
)
|
||||||
|
raise error
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def restore_snapshot(
|
||||||
|
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
||||||
|
) -> None:
|
||||||
|
"""Restores a snapshot to its original service using the given strategy"""
|
||||||
|
service = get_service_by_id(snapshot.service_name)
|
||||||
|
if service is None:
|
||||||
|
raise ValueError(
|
||||||
|
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
||||||
|
)
|
||||||
|
job = Backups._ensure_queued_restore_job(service, snapshot)
|
||||||
|
|
||||||
|
try:
|
||||||
|
Backups._assert_restorable(snapshot)
|
||||||
|
Jobs.update(
|
||||||
|
job, status=JobStatus.RUNNING, status_text="Stopping the service"
|
||||||
|
)
|
||||||
|
with StoppedService(service):
|
||||||
|
Backups.assert_dead(service)
|
||||||
|
if strategy == RestoreStrategy.INPLACE:
|
||||||
|
Backups._inplace_restore(service, snapshot, job)
|
||||||
|
else: # verify_before_download is our default
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text=f"Restoring from {snapshot.id}",
|
||||||
|
)
|
||||||
|
Backups._restore_service_from_snapshot(
|
||||||
|
service, snapshot.id, verify=True
|
||||||
|
)
|
||||||
|
|
||||||
|
service.post_restore()
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=90,
|
||||||
|
status_text="Restarting the service",
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as error:
|
||||||
|
Jobs.update(job, status=JobStatus.ERROR, status_text=str(error))
|
||||||
|
raise error
|
||||||
|
|
||||||
|
Jobs.update(job, status=JobStatus.FINISHED)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _assert_restorable(
|
||||||
|
snapshot: Snapshot, strategy=RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE
|
||||||
|
) -> None:
|
||||||
|
service = get_service_by_id(snapshot.service_name)
|
||||||
|
if service is None:
|
||||||
|
raise ValueError(
|
||||||
|
f"snapshot has a nonexistent service: {snapshot.service_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
restored_snap_size = Backups.snapshot_restored_size(snapshot.id)
|
||||||
|
|
||||||
|
if strategy == RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE:
|
||||||
|
needed_space = restored_snap_size
|
||||||
|
elif strategy == RestoreStrategy.INPLACE:
|
||||||
|
needed_space = restored_snap_size - service.get_storage_usage()
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"""
|
||||||
|
We do not know if there is enough space for restoration because
|
||||||
|
there is some novel restore strategy used!
|
||||||
|
This is a developer's fault, open an issue please
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
available_space = Backups.space_usable_for_service(service)
|
||||||
|
if needed_space > available_space:
|
||||||
|
raise ValueError(
|
||||||
|
f"we only have {available_space} bytes "
|
||||||
|
f"but snapshot needs {needed_space}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _restore_service_from_snapshot(
|
||||||
|
service: Service,
|
||||||
|
snapshot_id: str,
|
||||||
|
verify=True,
|
||||||
|
) -> None:
|
||||||
|
folders = service.get_folders()
|
||||||
|
|
||||||
|
Backups.provider().backupper.restore_from_backup(
|
||||||
|
snapshot_id,
|
||||||
|
folders,
|
||||||
|
verify=verify,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Snapshots
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_snapshots(service: Service) -> List[Snapshot]:
|
||||||
|
"""Returns all snapshots for a given service"""
|
||||||
|
snapshots = Backups.get_all_snapshots()
|
||||||
|
service_id = service.get_id()
|
||||||
|
return list(
|
||||||
|
filter(
|
||||||
|
lambda snap: snap.service_name == service_id,
|
||||||
|
snapshots,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_all_snapshots() -> List[Snapshot]:
|
||||||
|
"""Returns all snapshots"""
|
||||||
|
# When we refresh our cache:
|
||||||
|
# 1. Manually
|
||||||
|
# 2. On timer
|
||||||
|
# 3. On new snapshot
|
||||||
|
# 4. On snapshot deletion
|
||||||
|
|
||||||
|
return Storage.get_cached_snapshots()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
||||||
|
"""Returns a backup snapshot by its id"""
|
||||||
|
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
||||||
|
if snap is not None:
|
||||||
|
return snap
|
||||||
|
|
||||||
|
# Possibly our cache entry got invalidated, let's try one more time
|
||||||
|
Backups.force_snapshot_cache_reload()
|
||||||
|
snap = Storage.get_cached_snapshot_by_id(snapshot_id)
|
||||||
|
|
||||||
|
return snap
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def forget_snapshots(snapshots: List[Snapshot]) -> None:
|
||||||
|
"""
|
||||||
|
Deletes a batch of snapshots from the repo and syncs cache
|
||||||
|
Optimized
|
||||||
|
"""
|
||||||
|
ids = [snapshot.id for snapshot in snapshots]
|
||||||
|
Backups.provider().backupper.forget_snapshots(ids)
|
||||||
|
|
||||||
|
Backups.force_snapshot_cache_reload()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def forget_snapshot(snapshot: Snapshot) -> None:
|
||||||
|
"""Deletes a snapshot from the repo and from cache"""
|
||||||
|
Backups.forget_snapshots([snapshot])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def forget_all_snapshots():
|
||||||
|
"""
|
||||||
|
Mark all snapshots we have made for deletion and make them inaccessible
|
||||||
|
(this is done by cloud, we only issue a command)
|
||||||
|
"""
|
||||||
|
Backups.forget_snapshots(Backups.get_all_snapshots())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def force_snapshot_cache_reload() -> None:
|
||||||
|
"""
|
||||||
|
Forces a reload of the snapshot cache.
|
||||||
|
|
||||||
|
This may be an expensive operation, so use it wisely.
|
||||||
|
User pays for the API calls.
|
||||||
|
"""
|
||||||
|
upstream_snapshots = Backups.provider().backupper.get_snapshots()
|
||||||
|
Storage.invalidate_snapshot_storage()
|
||||||
|
for snapshot in upstream_snapshots:
|
||||||
|
Storage.cache_snapshot(snapshot)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def snapshot_restored_size(snapshot_id: str) -> int:
|
||||||
|
"""Returns the size of the snapshot"""
|
||||||
|
return Backups.provider().backupper.restored_size(
|
||||||
|
snapshot_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _on_new_snapshot_created(service_id: str, snapshot: Snapshot) -> None:
|
||||||
|
"""What do we do with a snapshot that is just made?"""
|
||||||
|
# non-expiring timestamp of the last
|
||||||
|
Storage.store_last_timestamp(service_id, snapshot)
|
||||||
|
Backups.force_snapshot_cache_reload()
|
||||||
|
|
||||||
|
# Autobackup
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def autobackup_period_minutes() -> Optional[int]:
|
||||||
|
"""None means autobackup is disabled"""
|
||||||
|
return Storage.autobackup_period_minutes()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_autobackup_period_minutes(minutes: int) -> None:
|
||||||
|
"""
|
||||||
|
0 and negative numbers are equivalent to disable.
|
||||||
|
Setting to a positive number may result in a backup very soon
|
||||||
|
if some services are not backed up.
|
||||||
|
"""
|
||||||
|
if minutes <= 0:
|
||||||
|
Backups.disable_all_autobackup()
|
||||||
|
return
|
||||||
|
Storage.store_autobackup_period_minutes(minutes)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def disable_all_autobackup() -> None:
|
||||||
|
"""
|
||||||
|
Disables all automatic backing up,
|
||||||
|
but does not change per-service settings
|
||||||
|
"""
|
||||||
|
Storage.delete_backup_period()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_time_to_backup(time: datetime) -> bool:
|
||||||
|
"""
|
||||||
|
Intended as a time validator for huey cron scheduler
|
||||||
|
of automatic backups
|
||||||
|
"""
|
||||||
|
|
||||||
|
return Backups.services_to_back_up(time) != []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def services_to_back_up(time: datetime) -> List[Service]:
|
||||||
|
"""Returns a list of services that should be backed up at a given time"""
|
||||||
|
return [
|
||||||
|
service
|
||||||
|
for service in get_all_services()
|
||||||
|
if Backups.is_time_to_backup_service(service, time)
|
||||||
|
]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_last_backed_up(service: Service) -> Optional[datetime]:
|
||||||
|
"""Get a timezone-aware time of the last backup of a service"""
|
||||||
|
return Storage.get_last_backup_time(service.get_id())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_last_backup_error_time(service: Service) -> Optional[datetime]:
|
||||||
|
"""Get a timezone-aware time of the last backup of a service"""
|
||||||
|
job = get_backup_fail(service)
|
||||||
|
if job is not None:
|
||||||
|
datetime_created = job.created_at
|
||||||
|
if datetime_created.tzinfo is None:
|
||||||
|
# assume it is in localtime
|
||||||
|
offset = timedelta(seconds=time.localtime().tm_gmtoff)
|
||||||
|
datetime_created = datetime_created - offset
|
||||||
|
return datetime.combine(
|
||||||
|
datetime_created.date(), datetime_created.time(), timezone.utc
|
||||||
|
)
|
||||||
|
return datetime_created
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_time_to_backup_service(service: Service, time: datetime):
|
||||||
|
"""Returns True if it is time to back up a service"""
|
||||||
|
period = Backups.autobackup_period_minutes()
|
||||||
|
if period is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not service.is_enabled():
|
||||||
|
return False
|
||||||
|
if not service.can_be_backed_up():
|
||||||
|
return False
|
||||||
|
|
||||||
|
last_error = Backups.get_last_backup_error_time(service)
|
||||||
|
|
||||||
|
if last_error is not None:
|
||||||
|
if time < last_error + timedelta(seconds=AUTOBACKUP_JOB_EXPIRATION_SECONDS):
|
||||||
|
return False
|
||||||
|
|
||||||
|
last_backup = Backups.get_last_backed_up(service)
|
||||||
|
|
||||||
|
# Queue a backup immediately if there are no previous backups
|
||||||
|
if last_backup is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if time > last_backup + timedelta(minutes=period):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Helpers
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def space_usable_for_service(service: Service) -> int:
|
||||||
|
"""
|
||||||
|
Returns the amount of space available on the volume the given
|
||||||
|
service is located on.
|
||||||
|
"""
|
||||||
|
folders = service.get_folders()
|
||||||
|
if folders == []:
|
||||||
|
raise ValueError("unallocated service", service.get_id())
|
||||||
|
|
||||||
|
# We assume all folders of one service live at the same volume
|
||||||
|
fs_info = statvfs(folders[0])
|
||||||
|
usable_bytes = fs_info.f_frsize * fs_info.f_bavail
|
||||||
|
return usable_bytes
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_localfile_repo(file_path: str):
|
||||||
|
"""Used by tests to set a local folder as a backup repo"""
|
||||||
|
# pylint: disable-next=invalid-name
|
||||||
|
ProviderClass = get_provider(BackupProviderEnum.FILE)
|
||||||
|
provider = ProviderClass(
|
||||||
|
login="",
|
||||||
|
key="",
|
||||||
|
location=file_path,
|
||||||
|
repo_id="",
|
||||||
|
)
|
||||||
|
Storage.store_provider(provider)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def assert_dead(service: Service):
|
||||||
|
"""
|
||||||
|
Checks if a service is dead and can be safely restored from a snapshot.
|
||||||
|
"""
|
||||||
|
if service.get_status() not in [
|
||||||
|
ServiceStatus.INACTIVE,
|
||||||
|
ServiceStatus.FAILED,
|
||||||
|
]:
|
||||||
|
raise NotDeadError(service)
|
|
@ -0,0 +1,73 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractBackupper(ABC):
|
||||||
|
"""Abstract class for backuppers"""
|
||||||
|
|
||||||
|
# flake8: noqa: B027
|
||||||
|
def __init__(self) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_initted(self) -> bool:
|
||||||
|
"""Returns true if the repository is initted"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set_creds(self, account: str, key: str, repo: str) -> None:
|
||||||
|
"""Set the credentials for the backupper"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def start_backup(
|
||||||
|
self,
|
||||||
|
folders: List[str],
|
||||||
|
service_name: str,
|
||||||
|
reason: BackupReason = BackupReason.EXPLICIT,
|
||||||
|
) -> Snapshot:
|
||||||
|
"""Start a backup of the given folders"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_snapshots(self) -> List[Snapshot]:
|
||||||
|
"""Get all snapshots from the repo"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def init(self) -> None:
|
||||||
|
"""Initialize the repository"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def erase_repo(self) -> None:
|
||||||
|
"""Completely empties the remote"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def restore_from_backup(
|
||||||
|
self,
|
||||||
|
snapshot_id: str,
|
||||||
|
folders: List[str],
|
||||||
|
verify=True,
|
||||||
|
) -> None:
|
||||||
|
"""Restore a target folder using a snapshot"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def restored_size(self, snapshot_id: str) -> int:
|
||||||
|
"""Get the size of the restored snapshot"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def forget_snapshot(self, snapshot_id) -> None:
|
||||||
|
"""Forget a snapshot"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
|
||||||
|
"""Maybe optimized deletion of a batch of snapshots, just cycling if unsupported"""
|
||||||
|
raise NotImplementedError
|
|
@ -0,0 +1,45 @@
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
||||||
|
|
||||||
|
|
||||||
|
class NoneBackupper(AbstractBackupper):
|
||||||
|
"""A backupper that does nothing"""
|
||||||
|
|
||||||
|
def is_initted(self, repo_name: str = "") -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def set_creds(self, account: str, key: str, repo: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def start_backup(
|
||||||
|
self, folders: List[str], tag: str, reason: BackupReason = BackupReason.EXPLICIT
|
||||||
|
):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def get_snapshots(self) -> List[Snapshot]:
|
||||||
|
"""Get all snapshots from the repo"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def erase_repo(self) -> None:
|
||||||
|
"""Completely empties the remote"""
|
||||||
|
# this one is already empty
|
||||||
|
pass
|
||||||
|
|
||||||
|
def restore_from_backup(self, snapshot_id: str, folders: List[str], verify=True):
|
||||||
|
"""Restore a target folder using a snapshot"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def restored_size(self, snapshot_id: str) -> int:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def forget_snapshot(self, snapshot_id):
|
||||||
|
raise NotImplementedError("forget_snapshot")
|
||||||
|
|
||||||
|
def forget_snapshots(self, snapshots):
|
||||||
|
raise NotImplementedError("forget_snapshots")
|
|
@ -0,0 +1,554 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import datetime
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from typing import List, Optional, TypeVar, Callable
|
||||||
|
from collections.abc import Iterable
|
||||||
|
from json.decoder import JSONDecodeError
|
||||||
|
from os.path import exists, join
|
||||||
|
from os import mkdir
|
||||||
|
from shutil import rmtree
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
||||||
|
from selfprivacy_api.backup.util import output_yielder, sync
|
||||||
|
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.backup.jobs import get_backup_job
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
||||||
|
|
||||||
|
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
||||||
|
|
||||||
|
SHORT_ID_LEN = 8
|
||||||
|
|
||||||
|
T = TypeVar("T", bound=Callable)
|
||||||
|
|
||||||
|
|
||||||
|
def unlocked_repo(func: T) -> T:
|
||||||
|
"""unlock repo and retry if it appears to be locked"""
|
||||||
|
|
||||||
|
def inner(self: ResticBackupper, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
except Exception as error:
|
||||||
|
if "unable to create lock" in str(error):
|
||||||
|
self.unlock()
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
raise error
|
||||||
|
|
||||||
|
# Above, we manually guarantee that the type returned is compatible.
|
||||||
|
return inner # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
class ResticBackupper(AbstractBackupper):
|
||||||
|
def __init__(self, login_flag: str, key_flag: str, storage_type: str) -> None:
|
||||||
|
self.login_flag = login_flag
|
||||||
|
self.key_flag = key_flag
|
||||||
|
self.storage_type = storage_type
|
||||||
|
self.account = ""
|
||||||
|
self.key = ""
|
||||||
|
self.repo = ""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def set_creds(self, account: str, key: str, repo: str) -> None:
|
||||||
|
self.account = account
|
||||||
|
self.key = key
|
||||||
|
self.repo = repo
|
||||||
|
|
||||||
|
def restic_repo(self) -> str:
|
||||||
|
# https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#other-services-via-rclone
|
||||||
|
# https://forum.rclone.org/t/can-rclone-be-run-solely-with-command-line-options-no-config-no-env-vars/6314/5
|
||||||
|
return f"rclone:{self.rclone_repo()}"
|
||||||
|
|
||||||
|
def rclone_repo(self) -> str:
|
||||||
|
return f"{self.storage_type}{self.repo}"
|
||||||
|
|
||||||
|
def rclone_args(self):
|
||||||
|
return "rclone.args=serve restic --stdio " + " ".join(
|
||||||
|
self.backend_rclone_args()
|
||||||
|
)
|
||||||
|
|
||||||
|
def backend_rclone_args(self) -> list[str]:
|
||||||
|
args = []
|
||||||
|
if self.account != "":
|
||||||
|
acc_args = [self.login_flag, self.account]
|
||||||
|
args.extend(acc_args)
|
||||||
|
if self.key != "":
|
||||||
|
key_args = [self.key_flag, self.key]
|
||||||
|
args.extend(key_args)
|
||||||
|
return args
|
||||||
|
|
||||||
|
def _password_command(self):
|
||||||
|
return f"echo {LocalBackupSecret.get()}"
|
||||||
|
|
||||||
|
def restic_command(self, *args, tags: Optional[List[str]] = None) -> List[str]:
|
||||||
|
"""
|
||||||
|
Construct a restic command against the currently configured repo
|
||||||
|
Can support [nested] arrays as arguments, will flatten them into the final commmand
|
||||||
|
"""
|
||||||
|
if tags is None:
|
||||||
|
tags = []
|
||||||
|
|
||||||
|
command = [
|
||||||
|
"restic",
|
||||||
|
"-o",
|
||||||
|
self.rclone_args(),
|
||||||
|
"-r",
|
||||||
|
self.restic_repo(),
|
||||||
|
"--password-command",
|
||||||
|
self._password_command(),
|
||||||
|
]
|
||||||
|
if tags != []:
|
||||||
|
for tag in tags:
|
||||||
|
command.extend(
|
||||||
|
[
|
||||||
|
"--tag",
|
||||||
|
tag,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
if args:
|
||||||
|
command.extend(ResticBackupper.__flatten_list(args))
|
||||||
|
return command
|
||||||
|
|
||||||
|
def erase_repo(self) -> None:
|
||||||
|
"""Fully erases repo on remote, can be reinitted again"""
|
||||||
|
command = [
|
||||||
|
"rclone",
|
||||||
|
"purge",
|
||||||
|
self.rclone_repo(),
|
||||||
|
]
|
||||||
|
backend_args = self.backend_rclone_args()
|
||||||
|
if backend_args:
|
||||||
|
command.extend(backend_args)
|
||||||
|
|
||||||
|
with subprocess.Popen(command, stdout=subprocess.PIPE, shell=False) as handle:
|
||||||
|
output = handle.communicate()[0].decode("utf-8")
|
||||||
|
if handle.returncode != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"purge exited with errorcode",
|
||||||
|
handle.returncode,
|
||||||
|
":",
|
||||||
|
output,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __flatten_list(list_to_flatten):
|
||||||
|
"""string-aware list flattener"""
|
||||||
|
result = []
|
||||||
|
for item in list_to_flatten:
|
||||||
|
if isinstance(item, Iterable) and not isinstance(item, str):
|
||||||
|
result.extend(ResticBackupper.__flatten_list(item))
|
||||||
|
continue
|
||||||
|
result.append(item)
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _run_backup_command(
|
||||||
|
backup_command: List[str], job: Optional[Job]
|
||||||
|
) -> List[dict]:
|
||||||
|
"""And handle backup output"""
|
||||||
|
messages = []
|
||||||
|
output = []
|
||||||
|
restic_reported_error = False
|
||||||
|
|
||||||
|
for raw_message in output_yielder(backup_command):
|
||||||
|
if "ERROR:" in raw_message:
|
||||||
|
restic_reported_error = True
|
||||||
|
output.append(raw_message)
|
||||||
|
|
||||||
|
if not restic_reported_error:
|
||||||
|
message = ResticBackupper.parse_message(raw_message, job)
|
||||||
|
messages.append(message)
|
||||||
|
|
||||||
|
if restic_reported_error:
|
||||||
|
raise ValueError(
|
||||||
|
"Restic returned error(s): ",
|
||||||
|
output,
|
||||||
|
)
|
||||||
|
|
||||||
|
return messages
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _replace_in_array(array: List[str], target, replacement) -> None:
|
||||||
|
if target == "":
|
||||||
|
return
|
||||||
|
|
||||||
|
for i, value in enumerate(array):
|
||||||
|
if target in value:
|
||||||
|
array[i] = array[i].replace(target, replacement)
|
||||||
|
|
||||||
|
def _censor_command(self, command: List[str]) -> List[str]:
|
||||||
|
result = command.copy()
|
||||||
|
ResticBackupper._replace_in_array(result, self.key, "CENSORED")
|
||||||
|
ResticBackupper._replace_in_array(result, LocalBackupSecret.get(), "CENSORED")
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_backup_job(service_name: str) -> Optional[Job]:
|
||||||
|
service = get_service_by_id(service_name)
|
||||||
|
if service is None:
|
||||||
|
raise ValueError("No service with id ", service_name)
|
||||||
|
|
||||||
|
return get_backup_job(service)
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def start_backup(
|
||||||
|
self,
|
||||||
|
folders: List[str],
|
||||||
|
service_name: str,
|
||||||
|
reason: BackupReason = BackupReason.EXPLICIT,
|
||||||
|
) -> Snapshot:
|
||||||
|
"""
|
||||||
|
Start backup with restic
|
||||||
|
"""
|
||||||
|
assert len(folders) != 0
|
||||||
|
|
||||||
|
job = ResticBackupper._get_backup_job(service_name)
|
||||||
|
|
||||||
|
tags = [service_name, reason.value]
|
||||||
|
backup_command = self.restic_command(
|
||||||
|
"backup",
|
||||||
|
"--json",
|
||||||
|
folders,
|
||||||
|
tags=tags,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
messages = ResticBackupper._run_backup_command(backup_command, job)
|
||||||
|
|
||||||
|
id = ResticBackupper._snapshot_id_from_backup_messages(messages)
|
||||||
|
return Snapshot(
|
||||||
|
created_at=datetime.datetime.now(datetime.timezone.utc),
|
||||||
|
id=id,
|
||||||
|
service_name=service_name,
|
||||||
|
reason=reason,
|
||||||
|
)
|
||||||
|
|
||||||
|
except ValueError as error:
|
||||||
|
raise ValueError(
|
||||||
|
"Could not create a snapshot: ",
|
||||||
|
str(error),
|
||||||
|
"command: ",
|
||||||
|
self._censor_command(backup_command),
|
||||||
|
) from error
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _snapshot_id_from_backup_messages(messages) -> str:
|
||||||
|
for message in messages:
|
||||||
|
if message["message_type"] == "summary":
|
||||||
|
# There is a discrepancy between versions of restic/rclone
|
||||||
|
# Some report short_id in this field and some full
|
||||||
|
return message["snapshot_id"][0:SHORT_ID_LEN]
|
||||||
|
|
||||||
|
raise ValueError("no summary message in restic json output")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_message(raw_message_line: str, job: Optional[Job] = None) -> dict:
|
||||||
|
message = ResticBackupper.parse_json_output(raw_message_line)
|
||||||
|
if not isinstance(message, dict):
|
||||||
|
raise ValueError("we have too many messages on one line?")
|
||||||
|
if message["message_type"] == "status":
|
||||||
|
if job is not None: # only update status if we run under some job
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
JobStatus.RUNNING,
|
||||||
|
progress=int(message["percent_done"] * 100),
|
||||||
|
)
|
||||||
|
return message
|
||||||
|
|
||||||
|
def init(self) -> None:
|
||||||
|
init_command = self.restic_command(
|
||||||
|
"init",
|
||||||
|
)
|
||||||
|
with subprocess.Popen(
|
||||||
|
init_command,
|
||||||
|
shell=False,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
) as process_handle:
|
||||||
|
output = process_handle.communicate()[0].decode("utf-8")
|
||||||
|
if "created restic repository" not in output:
|
||||||
|
raise ValueError("cannot init a repo: " + output)
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def is_initted(self) -> bool:
|
||||||
|
command = self.restic_command(
|
||||||
|
"check",
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
shell=False,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
) as handle:
|
||||||
|
output = handle.communicate()[0].decode("utf-8")
|
||||||
|
if handle.returncode != 0:
|
||||||
|
if "unable to create lock" in output:
|
||||||
|
raise ValueError("Stale lock detected: ", output)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def unlock(self) -> None:
|
||||||
|
"""Remove stale locks."""
|
||||||
|
command = self.restic_command(
|
||||||
|
"unlock",
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
shell=False,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
) as handle:
|
||||||
|
# communication forces to complete and for returncode to get defined
|
||||||
|
output = handle.communicate()[0].decode("utf-8")
|
||||||
|
if handle.returncode != 0:
|
||||||
|
raise ValueError("cannot unlock the backup repository: ", output)
|
||||||
|
|
||||||
|
def lock(self) -> None:
|
||||||
|
"""
|
||||||
|
Introduce a stale lock.
|
||||||
|
Mainly for testing purposes.
|
||||||
|
Double lock is supposed to fail
|
||||||
|
"""
|
||||||
|
command = self.restic_command(
|
||||||
|
"check",
|
||||||
|
)
|
||||||
|
|
||||||
|
# using temporary cache in /run/user/1000/restic-check-cache-817079729
|
||||||
|
# repository 9639c714 opened (repository version 2) successfully, password is correct
|
||||||
|
# created new cache in /run/user/1000/restic-check-cache-817079729
|
||||||
|
# create exclusive lock for repository
|
||||||
|
# load indexes
|
||||||
|
# check all packs
|
||||||
|
# check snapshots, trees and blobs
|
||||||
|
# [0:00] 100.00% 1 / 1 snapshots
|
||||||
|
# no errors were found
|
||||||
|
|
||||||
|
try:
|
||||||
|
for line in output_yielder(command):
|
||||||
|
if "indexes" in line:
|
||||||
|
break
|
||||||
|
if "unable" in line:
|
||||||
|
raise ValueError(line)
|
||||||
|
except Exception as error:
|
||||||
|
raise ValueError("could not lock repository") from error
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def restored_size(self, snapshot_id: str) -> int:
|
||||||
|
"""
|
||||||
|
Size of a snapshot
|
||||||
|
"""
|
||||||
|
command = self.restic_command(
|
||||||
|
"stats",
|
||||||
|
snapshot_id,
|
||||||
|
"--json",
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
shell=False,
|
||||||
|
) as handle:
|
||||||
|
output = handle.communicate()[0].decode("utf-8")
|
||||||
|
try:
|
||||||
|
parsed_output = ResticBackupper.parse_json_output(output)
|
||||||
|
return parsed_output["total_size"]
|
||||||
|
except ValueError as error:
|
||||||
|
raise ValueError("cannot restore a snapshot: " + output) from error
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def restore_from_backup(
|
||||||
|
self,
|
||||||
|
snapshot_id,
|
||||||
|
folders: List[str],
|
||||||
|
verify=True,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Restore from backup with restic
|
||||||
|
"""
|
||||||
|
if folders is None or folders == []:
|
||||||
|
raise ValueError("cannot restore without knowing where to!")
|
||||||
|
|
||||||
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
|
if verify:
|
||||||
|
self._raw_verified_restore(snapshot_id, target=temp_dir)
|
||||||
|
snapshot_root = temp_dir
|
||||||
|
for folder in folders:
|
||||||
|
src = join(snapshot_root, folder.strip("/"))
|
||||||
|
if not exists(src):
|
||||||
|
raise ValueError(
|
||||||
|
f"No such path: {src}. We tried to find {folder}"
|
||||||
|
)
|
||||||
|
dst = folder
|
||||||
|
sync(src, dst)
|
||||||
|
|
||||||
|
else: # attempting inplace restore
|
||||||
|
for folder in folders:
|
||||||
|
rmtree(folder)
|
||||||
|
mkdir(folder)
|
||||||
|
self._raw_verified_restore(snapshot_id, target="/")
|
||||||
|
return
|
||||||
|
|
||||||
|
def _raw_verified_restore(self, snapshot_id, target="/"):
|
||||||
|
"""barebones restic restore"""
|
||||||
|
restore_command = self.restic_command(
|
||||||
|
"restore", snapshot_id, "--target", target, "--verify"
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
restore_command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
shell=False,
|
||||||
|
) as handle:
|
||||||
|
# for some reason restore does not support
|
||||||
|
# nice reporting of progress via json
|
||||||
|
output = handle.communicate()[0].decode("utf-8")
|
||||||
|
if "restoring" not in output:
|
||||||
|
raise ValueError("cannot restore a snapshot: " + output)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
handle.returncode is not None
|
||||||
|
) # none should be impossible after communicate
|
||||||
|
if handle.returncode != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"restore exited with errorcode",
|
||||||
|
handle.returncode,
|
||||||
|
":",
|
||||||
|
output,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forget_snapshot(self, snapshot_id: str) -> None:
|
||||||
|
self.forget_snapshots([snapshot_id])
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def forget_snapshots(self, snapshot_ids: List[str]) -> None:
|
||||||
|
# in case the backupper program supports batching, otherwise implement it by cycling
|
||||||
|
forget_command = self.restic_command(
|
||||||
|
"forget",
|
||||||
|
[snapshot_ids],
|
||||||
|
# TODO: prune should be done in a separate process
|
||||||
|
"--prune",
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
forget_command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
shell=False,
|
||||||
|
) as handle:
|
||||||
|
# for some reason restore does not support
|
||||||
|
# nice reporting of progress via json
|
||||||
|
output, err = [
|
||||||
|
string.decode(
|
||||||
|
"utf-8",
|
||||||
|
)
|
||||||
|
for string in handle.communicate()
|
||||||
|
]
|
||||||
|
|
||||||
|
if "no matching ID found" in err:
|
||||||
|
raise ValueError(
|
||||||
|
"trying to delete, but no such snapshot(s): ", snapshot_ids
|
||||||
|
)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
handle.returncode is not None
|
||||||
|
) # none should be impossible after communicate
|
||||||
|
if handle.returncode != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"forget exited with errorcode", handle.returncode, ":", output, err
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_snapshots(self) -> object:
|
||||||
|
"""
|
||||||
|
Load list of snapshots from repository
|
||||||
|
raises Value Error if repo does not exist
|
||||||
|
"""
|
||||||
|
listing_command = self.restic_command(
|
||||||
|
"snapshots",
|
||||||
|
"--json",
|
||||||
|
)
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
listing_command,
|
||||||
|
shell=False,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
) as backup_listing_process_descriptor:
|
||||||
|
output = backup_listing_process_descriptor.communicate()[0].decode("utf-8")
|
||||||
|
|
||||||
|
if "Is there a repository at the following location?" in output:
|
||||||
|
raise ValueError("No repository! : " + output)
|
||||||
|
try:
|
||||||
|
return ResticBackupper.parse_json_output(output)
|
||||||
|
except ValueError as error:
|
||||||
|
raise ValueError("Cannot load snapshots: ", output) from error
|
||||||
|
|
||||||
|
@unlocked_repo
|
||||||
|
def get_snapshots(self) -> List[Snapshot]:
|
||||||
|
"""Get all snapshots from the repo"""
|
||||||
|
snapshots = []
|
||||||
|
|
||||||
|
for restic_snapshot in self._load_snapshots():
|
||||||
|
# Compatibility with previous snaps:
|
||||||
|
if len(restic_snapshot["tags"]) == 1:
|
||||||
|
reason = BackupReason.EXPLICIT
|
||||||
|
else:
|
||||||
|
reason = restic_snapshot["tags"][1]
|
||||||
|
|
||||||
|
snapshot = Snapshot(
|
||||||
|
id=restic_snapshot["short_id"],
|
||||||
|
created_at=restic_snapshot["time"],
|
||||||
|
service_name=restic_snapshot["tags"][0],
|
||||||
|
reason=reason,
|
||||||
|
)
|
||||||
|
|
||||||
|
snapshots.append(snapshot)
|
||||||
|
return snapshots
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_json_output(output: str) -> object:
|
||||||
|
starting_index = ResticBackupper.json_start(output)
|
||||||
|
|
||||||
|
if starting_index == -1:
|
||||||
|
raise ValueError("There is no json in the restic output: " + output)
|
||||||
|
|
||||||
|
truncated_output = output[starting_index:]
|
||||||
|
json_messages = truncated_output.splitlines()
|
||||||
|
if len(json_messages) == 1:
|
||||||
|
try:
|
||||||
|
return json.loads(truncated_output)
|
||||||
|
except JSONDecodeError as error:
|
||||||
|
raise ValueError(
|
||||||
|
"There is no json in the restic output : " + output
|
||||||
|
) from error
|
||||||
|
|
||||||
|
result_array = []
|
||||||
|
for message in json_messages:
|
||||||
|
result_array.append(json.loads(message))
|
||||||
|
return result_array
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def json_start(output: str) -> int:
|
||||||
|
indices = [
|
||||||
|
output.find("["),
|
||||||
|
output.find("{"),
|
||||||
|
]
|
||||||
|
indices = [x for x in indices if x != -1]
|
||||||
|
|
||||||
|
if indices == []:
|
||||||
|
return -1
|
||||||
|
return min(indices)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def has_json(output: str) -> bool:
|
||||||
|
if ResticBackupper.json_start(output) == -1:
|
||||||
|
return False
|
||||||
|
return True
|
|
@ -0,0 +1,115 @@
|
||||||
|
from typing import Optional, List
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.jobs import Jobs, Job, JobStatus
|
||||||
|
from selfprivacy_api.services.service import Service
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
|
||||||
|
|
||||||
|
def job_type_prefix(service: Service) -> str:
|
||||||
|
return f"services.{service.get_id()}"
|
||||||
|
|
||||||
|
|
||||||
|
def backup_job_type(service: Service) -> str:
|
||||||
|
return f"{job_type_prefix(service)}.backup"
|
||||||
|
|
||||||
|
|
||||||
|
def autobackup_job_type() -> str:
|
||||||
|
return "backups.autobackup"
|
||||||
|
|
||||||
|
|
||||||
|
def restore_job_type(service: Service) -> str:
|
||||||
|
return f"{job_type_prefix(service)}.restore"
|
||||||
|
|
||||||
|
|
||||||
|
def get_jobs_by_service(service: Service) -> List[Job]:
|
||||||
|
result = []
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if job.type_id.startswith(job_type_prefix(service)) and job.status in [
|
||||||
|
JobStatus.CREATED,
|
||||||
|
JobStatus.RUNNING,
|
||||||
|
]:
|
||||||
|
result.append(job)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def is_something_running_for(service: Service) -> bool:
|
||||||
|
running_jobs = [
|
||||||
|
job for job in get_jobs_by_service(service) if job.status == JobStatus.RUNNING
|
||||||
|
]
|
||||||
|
return len(running_jobs) != 0
|
||||||
|
|
||||||
|
|
||||||
|
def add_autobackup_job(services: List[Service]) -> Job:
|
||||||
|
service_names = [s.get_display_name() for s in services]
|
||||||
|
pretty_service_list: str = ", ".join(service_names)
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id=autobackup_job_type(),
|
||||||
|
name="Automatic backup",
|
||||||
|
description=f"Scheduled backup for services: {pretty_service_list}",
|
||||||
|
)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def add_backup_job(service: Service) -> Job:
|
||||||
|
if is_something_running_for(service):
|
||||||
|
message = (
|
||||||
|
f"Cannot start a backup of {service.get_id()}, another operation is running: "
|
||||||
|
+ get_jobs_by_service(service)[0].type_id
|
||||||
|
)
|
||||||
|
raise ValueError(message)
|
||||||
|
display_name = service.get_display_name()
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id=backup_job_type(service),
|
||||||
|
name=f"Backup {display_name}",
|
||||||
|
description=f"Backing up {display_name}",
|
||||||
|
)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def add_restore_job(snapshot: Snapshot) -> Job:
|
||||||
|
service = get_service_by_id(snapshot.service_name)
|
||||||
|
if service is None:
|
||||||
|
raise ValueError(f"no such service: {snapshot.service_name}")
|
||||||
|
if is_something_running_for(service):
|
||||||
|
message = (
|
||||||
|
f"Cannot start a restore of {service.get_id()}, another operation is running: "
|
||||||
|
+ get_jobs_by_service(service)[0].type_id
|
||||||
|
)
|
||||||
|
raise ValueError(message)
|
||||||
|
display_name = service.get_display_name()
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id=restore_job_type(service),
|
||||||
|
name=f"Restore {display_name}",
|
||||||
|
description=f"restoring {display_name} from {snapshot.id}",
|
||||||
|
)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_by_type(type_id: str) -> Optional[Job]:
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if job.type_id == type_id and job.status in [
|
||||||
|
JobStatus.CREATED,
|
||||||
|
JobStatus.RUNNING,
|
||||||
|
]:
|
||||||
|
return job
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_failed_job_by_type(type_id: str) -> Optional[Job]:
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if job.type_id == type_id and job.status == JobStatus.ERROR:
|
||||||
|
return job
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_backup_job(service: Service) -> Optional[Job]:
|
||||||
|
return get_job_by_type(backup_job_type(service))
|
||||||
|
|
||||||
|
|
||||||
|
def get_backup_fail(service: Service) -> Optional[Job]:
|
||||||
|
return get_failed_job_by_type(backup_job_type(service))
|
||||||
|
|
||||||
|
|
||||||
|
def get_restore_job(service: Service) -> Optional[Job]:
|
||||||
|
return get_job_by_type(restore_job_type(service))
|
|
@ -0,0 +1,45 @@
|
||||||
|
"""Handling of local secret used for encrypted backups.
|
||||||
|
Separated out for circular dependency reasons
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
import secrets
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||||
|
|
||||||
|
|
||||||
|
REDIS_KEY = "backup:local_secret"
|
||||||
|
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
|
||||||
|
|
||||||
|
class LocalBackupSecret:
|
||||||
|
@staticmethod
|
||||||
|
def get() -> str:
|
||||||
|
"""A secret string which backblaze/other clouds do not know.
|
||||||
|
Serves as encryption key.
|
||||||
|
"""
|
||||||
|
if not LocalBackupSecret.exists():
|
||||||
|
LocalBackupSecret.reset()
|
||||||
|
return redis.get(REDIS_KEY) # type: ignore
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set(secret: str):
|
||||||
|
redis.set(REDIS_KEY, secret)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reset():
|
||||||
|
new_secret = LocalBackupSecret._generate()
|
||||||
|
LocalBackupSecret.set(new_secret)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _full_reset():
|
||||||
|
redis.delete(REDIS_KEY)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def exists() -> bool:
|
||||||
|
return redis.exists(REDIS_KEY) == 1
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generate() -> str:
|
||||||
|
return secrets.token_urlsafe(256)
|
|
@ -0,0 +1,31 @@
|
||||||
|
from typing import Type
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||||
|
|
||||||
|
from selfprivacy_api.backup.providers.backblaze import Backblaze
|
||||||
|
from selfprivacy_api.backup.providers.memory import InMemoryBackup
|
||||||
|
from selfprivacy_api.backup.providers.local_file import LocalFileBackup
|
||||||
|
from selfprivacy_api.backup.providers.none import NoBackups
|
||||||
|
|
||||||
|
PROVIDER_MAPPING: dict[BackupProviderEnum, Type[AbstractBackupProvider]] = {
|
||||||
|
BackupProviderEnum.BACKBLAZE: Backblaze,
|
||||||
|
BackupProviderEnum.MEMORY: InMemoryBackup,
|
||||||
|
BackupProviderEnum.FILE: LocalFileBackup,
|
||||||
|
BackupProviderEnum.NONE: NoBackups,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_provider(
|
||||||
|
provider_type: BackupProviderEnum,
|
||||||
|
) -> Type[AbstractBackupProvider]:
|
||||||
|
if provider_type not in PROVIDER_MAPPING.keys():
|
||||||
|
raise LookupError("could not look up provider", provider_type)
|
||||||
|
return PROVIDER_MAPPING[provider_type]
|
||||||
|
|
||||||
|
|
||||||
|
def get_kind(provider: AbstractBackupProvider) -> str:
|
||||||
|
"""Get the kind of the provider in the form of a string"""
|
||||||
|
return provider.name.value
|
|
@ -0,0 +1,11 @@
|
||||||
|
from .provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Backblaze(AbstractBackupProvider):
|
||||||
|
backupper = ResticBackupper("--b2-account", "--b2-key", ":b2:")
|
||||||
|
|
||||||
|
name = BackupProviderEnum.BACKBLAZE
|
|
@ -0,0 +1,11 @@
|
||||||
|
from .provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LocalFileBackup(AbstractBackupProvider):
|
||||||
|
backupper = ResticBackupper("", "", ":local:")
|
||||||
|
|
||||||
|
name = BackupProviderEnum.FILE
|
|
@ -0,0 +1,11 @@
|
||||||
|
from .provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.backuppers.restic_backupper import ResticBackupper
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InMemoryBackup(AbstractBackupProvider):
|
||||||
|
backupper = ResticBackupper("", "", ":memory:")
|
||||||
|
|
||||||
|
name = BackupProviderEnum.MEMORY
|
|
@ -0,0 +1,11 @@
|
||||||
|
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.backuppers.none_backupper import NoneBackupper
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class NoBackups(AbstractBackupProvider):
|
||||||
|
backupper = NoneBackupper()
|
||||||
|
|
||||||
|
name = BackupProviderEnum.NONE
|
|
@ -0,0 +1,25 @@
|
||||||
|
"""
|
||||||
|
An abstract class for BackBlaze, S3 etc.
|
||||||
|
It assumes that while some providers are supported via restic/rclone, others
|
||||||
|
may require different backends
|
||||||
|
"""
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from selfprivacy_api.backup.backuppers import AbstractBackupper
|
||||||
|
from selfprivacy_api.graphql.queries.providers import (
|
||||||
|
BackupProvider as BackupProviderEnum,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractBackupProvider(ABC):
|
||||||
|
backupper: AbstractBackupper
|
||||||
|
|
||||||
|
name: BackupProviderEnum
|
||||||
|
|
||||||
|
def __init__(self, login="", key="", location="", repo_id=""):
|
||||||
|
self.backupper.set_creds(login, key, location)
|
||||||
|
self.login = login
|
||||||
|
self.key = key
|
||||||
|
self.location = location
|
||||||
|
# We do not need to do anything with this one
|
||||||
|
# Just remember in case the app forgets
|
||||||
|
self.repo_id = repo_id
|
|
@ -0,0 +1,198 @@
|
||||||
|
"""
|
||||||
|
Module for storing backup related data in redis.
|
||||||
|
"""
|
||||||
|
from typing import List, Optional
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.models.backup.provider import BackupProviderModel
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import (
|
||||||
|
AutobackupQuotas,
|
||||||
|
_AutobackupQuotas,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||||
|
from selfprivacy_api.utils.redis_model_storage import (
|
||||||
|
store_model_as_hash,
|
||||||
|
hash_as_model,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.backup.providers.provider import AbstractBackupProvider
|
||||||
|
from selfprivacy_api.backup.providers import get_kind
|
||||||
|
|
||||||
|
REDIS_SNAPSHOTS_PREFIX = "backups:snapshots:"
|
||||||
|
REDIS_LAST_BACKUP_PREFIX = "backups:last-backed-up:"
|
||||||
|
REDIS_INITTED_CACHE = "backups:repo_initted"
|
||||||
|
|
||||||
|
REDIS_PROVIDER_KEY = "backups:provider"
|
||||||
|
REDIS_AUTOBACKUP_PERIOD_KEY = "backups:autobackup_period"
|
||||||
|
|
||||||
|
REDIS_AUTOBACKUP_QUOTAS_KEY = "backups:autobackup_quotas_key"
|
||||||
|
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
|
||||||
|
|
||||||
|
class Storage:
|
||||||
|
"""Static class for storing backup related data in redis"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reset() -> None:
|
||||||
|
"""Deletes all backup related data from redis"""
|
||||||
|
redis.delete(REDIS_PROVIDER_KEY)
|
||||||
|
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
||||||
|
redis.delete(REDIS_INITTED_CACHE)
|
||||||
|
redis.delete(REDIS_AUTOBACKUP_QUOTAS_KEY)
|
||||||
|
|
||||||
|
prefixes_to_clean = [
|
||||||
|
REDIS_SNAPSHOTS_PREFIX,
|
||||||
|
REDIS_LAST_BACKUP_PREFIX,
|
||||||
|
]
|
||||||
|
|
||||||
|
for prefix in prefixes_to_clean:
|
||||||
|
for key in redis.keys(prefix + "*"):
|
||||||
|
redis.delete(key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def invalidate_snapshot_storage() -> None:
|
||||||
|
"""Deletes all cached snapshots from redis"""
|
||||||
|
for key in redis.keys(REDIS_SNAPSHOTS_PREFIX + "*"):
|
||||||
|
redis.delete(key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __last_backup_key(service_id: str) -> str:
|
||||||
|
return REDIS_LAST_BACKUP_PREFIX + service_id
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __snapshot_key(snapshot: Snapshot) -> str:
|
||||||
|
return REDIS_SNAPSHOTS_PREFIX + snapshot.id
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_last_backup_time(service_id: str) -> Optional[datetime]:
|
||||||
|
"""Returns last backup time for a service or None if it was never backed up"""
|
||||||
|
key = Storage.__last_backup_key(service_id)
|
||||||
|
if not redis.exists(key):
|
||||||
|
return None
|
||||||
|
|
||||||
|
snapshot = hash_as_model(redis, key, Snapshot)
|
||||||
|
if not snapshot:
|
||||||
|
return None
|
||||||
|
return snapshot.created_at
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def store_last_timestamp(service_id: str, snapshot: Snapshot) -> None:
|
||||||
|
"""Stores last backup time for a service"""
|
||||||
|
store_model_as_hash(
|
||||||
|
redis,
|
||||||
|
Storage.__last_backup_key(service_id),
|
||||||
|
snapshot,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def cache_snapshot(snapshot: Snapshot) -> None:
|
||||||
|
"""Stores snapshot metadata in redis for caching purposes"""
|
||||||
|
snapshot_key = Storage.__snapshot_key(snapshot)
|
||||||
|
store_model_as_hash(redis, snapshot_key, snapshot)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_cached_snapshot(snapshot: Snapshot) -> None:
|
||||||
|
"""Deletes snapshot metadata from redis"""
|
||||||
|
snapshot_key = Storage.__snapshot_key(snapshot)
|
||||||
|
redis.delete(snapshot_key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_cached_snapshot_by_id(snapshot_id: str) -> Optional[Snapshot]:
|
||||||
|
"""Returns cached snapshot by id or None if it doesn't exist"""
|
||||||
|
key = REDIS_SNAPSHOTS_PREFIX + snapshot_id
|
||||||
|
if not redis.exists(key):
|
||||||
|
return None
|
||||||
|
return hash_as_model(redis, key, Snapshot)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_cached_snapshots() -> List[Snapshot]:
|
||||||
|
"""Returns all cached snapshots stored in redis"""
|
||||||
|
keys: list[str] = redis.keys(REDIS_SNAPSHOTS_PREFIX + "*") # type: ignore
|
||||||
|
result: list[Snapshot] = []
|
||||||
|
|
||||||
|
for key in keys:
|
||||||
|
snapshot = hash_as_model(redis, key, Snapshot)
|
||||||
|
if snapshot:
|
||||||
|
result.append(snapshot)
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def autobackup_period_minutes() -> Optional[int]:
|
||||||
|
"""None means autobackup is disabled"""
|
||||||
|
if not redis.exists(REDIS_AUTOBACKUP_PERIOD_KEY):
|
||||||
|
return None
|
||||||
|
return int(redis.get(REDIS_AUTOBACKUP_PERIOD_KEY)) # type: ignore
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def store_autobackup_period_minutes(minutes: int) -> None:
|
||||||
|
"""Set the new autobackup period in minutes"""
|
||||||
|
redis.set(REDIS_AUTOBACKUP_PERIOD_KEY, minutes)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_backup_period() -> None:
|
||||||
|
"""Set the autobackup period to none, effectively disabling autobackup"""
|
||||||
|
redis.delete(REDIS_AUTOBACKUP_PERIOD_KEY)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def store_provider(provider: AbstractBackupProvider) -> None:
|
||||||
|
"""Stores backup provider auth data in redis"""
|
||||||
|
model = BackupProviderModel(
|
||||||
|
kind=get_kind(provider),
|
||||||
|
login=provider.login,
|
||||||
|
key=provider.key,
|
||||||
|
location=provider.location,
|
||||||
|
repo_id=provider.repo_id,
|
||||||
|
)
|
||||||
|
store_model_as_hash(redis, REDIS_PROVIDER_KEY, model)
|
||||||
|
if Storage.load_provider() != model:
|
||||||
|
raise IOError("could not store the provider model: ", model.dict)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load_provider() -> Optional[BackupProviderModel]:
|
||||||
|
"""Loads backup storage provider auth data from redis"""
|
||||||
|
provider_model = hash_as_model(
|
||||||
|
redis,
|
||||||
|
REDIS_PROVIDER_KEY,
|
||||||
|
BackupProviderModel,
|
||||||
|
)
|
||||||
|
return provider_model
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def has_init_mark() -> bool:
|
||||||
|
"""Returns True if the repository was initialized"""
|
||||||
|
if redis.exists(REDIS_INITTED_CACHE):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mark_as_init():
|
||||||
|
"""Marks the repository as initialized"""
|
||||||
|
redis.set(REDIS_INITTED_CACHE, 1)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def mark_as_uninitted():
|
||||||
|
"""Marks the repository as initialized"""
|
||||||
|
redis.delete(REDIS_INITTED_CACHE)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_autobackup_quotas(quotas: AutobackupQuotas) -> None:
|
||||||
|
store_model_as_hash(redis, REDIS_AUTOBACKUP_QUOTAS_KEY, quotas.to_pydantic())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def autobackup_quotas() -> AutobackupQuotas:
|
||||||
|
quotas_model = hash_as_model(
|
||||||
|
redis, REDIS_AUTOBACKUP_QUOTAS_KEY, _AutobackupQuotas
|
||||||
|
)
|
||||||
|
if quotas_model is None:
|
||||||
|
unlimited_quotas = AutobackupQuotas(
|
||||||
|
last=-1,
|
||||||
|
daily=-1,
|
||||||
|
weekly=-1,
|
||||||
|
monthly=-1,
|
||||||
|
yearly=-1,
|
||||||
|
)
|
||||||
|
return unlimited_quotas
|
||||||
|
return AutobackupQuotas.from_pydantic(quotas_model) # pylint: disable=no-member
|
|
@ -0,0 +1,117 @@
|
||||||
|
"""
|
||||||
|
The tasks module contains the worker tasks that are used to back up and restore
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import (
|
||||||
|
RestoreStrategy,
|
||||||
|
BackupReason,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.models.backup.snapshot import Snapshot
|
||||||
|
from selfprivacy_api.utils.huey import huey
|
||||||
|
from huey import crontab
|
||||||
|
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
from selfprivacy_api.backup import Backups
|
||||||
|
from selfprivacy_api.backup.jobs import add_autobackup_job
|
||||||
|
from selfprivacy_api.jobs import Jobs, JobStatus, Job
|
||||||
|
|
||||||
|
|
||||||
|
SNAPSHOT_CACHE_TTL_HOURS = 6
|
||||||
|
|
||||||
|
|
||||||
|
def validate_datetime(dt: datetime) -> bool:
|
||||||
|
"""
|
||||||
|
Validates that it is time to back up.
|
||||||
|
Also ensures that the timezone-aware time is used.
|
||||||
|
"""
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
return Backups.is_time_to_backup(dt.replace(tzinfo=timezone.utc))
|
||||||
|
return Backups.is_time_to_backup(dt)
|
||||||
|
|
||||||
|
|
||||||
|
# huey tasks need to return something
|
||||||
|
@huey.task()
|
||||||
|
def start_backup(service_id: str, reason: BackupReason = BackupReason.EXPLICIT) -> bool:
|
||||||
|
"""
|
||||||
|
The worker task that starts the backup process.
|
||||||
|
"""
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
raise ValueError(f"No such service: {service_id}")
|
||||||
|
Backups.back_up(service, reason)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def prune_autobackup_snapshots(job: Job) -> bool:
|
||||||
|
"""
|
||||||
|
Remove all autobackup snapshots that do not fit into quotas set
|
||||||
|
"""
|
||||||
|
Jobs.update(job, JobStatus.RUNNING)
|
||||||
|
try:
|
||||||
|
Backups.prune_all_autosnaps()
|
||||||
|
except Exception as e:
|
||||||
|
Jobs.update(job, JobStatus.ERROR, error=type(e).__name__ + ":" + str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
Jobs.update(job, JobStatus.FINISHED)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def restore_snapshot(
|
||||||
|
snapshot: Snapshot,
|
||||||
|
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
The worker task that starts the restore process.
|
||||||
|
"""
|
||||||
|
Backups.restore_snapshot(snapshot, strategy)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def do_autobackup() -> None:
|
||||||
|
"""
|
||||||
|
Body of autobackup task, broken out to test it
|
||||||
|
For some reason, we cannot launch periodic huey tasks
|
||||||
|
inside tests
|
||||||
|
"""
|
||||||
|
time = datetime.utcnow().replace(tzinfo=timezone.utc)
|
||||||
|
services_to_back_up = Backups.services_to_back_up(time)
|
||||||
|
if not services_to_back_up:
|
||||||
|
return
|
||||||
|
job = add_autobackup_job(services_to_back_up)
|
||||||
|
|
||||||
|
progress_per_service = 100 // len(services_to_back_up)
|
||||||
|
progress = 0
|
||||||
|
Jobs.update(job, JobStatus.RUNNING, progress=progress)
|
||||||
|
|
||||||
|
for service in services_to_back_up:
|
||||||
|
try:
|
||||||
|
Backups.back_up(service, BackupReason.AUTO)
|
||||||
|
except Exception as error:
|
||||||
|
Jobs.update(
|
||||||
|
job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error=type(error).__name__ + ": " + str(error),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
progress = progress + progress_per_service
|
||||||
|
Jobs.update(job, JobStatus.RUNNING, progress=progress)
|
||||||
|
|
||||||
|
Jobs.update(job, JobStatus.FINISHED)
|
||||||
|
|
||||||
|
|
||||||
|
@huey.periodic_task(validate_datetime=validate_datetime)
|
||||||
|
def automatic_backup() -> None:
|
||||||
|
"""
|
||||||
|
The worker periodic task that starts the automatic backup process.
|
||||||
|
"""
|
||||||
|
do_autobackup()
|
||||||
|
|
||||||
|
|
||||||
|
@huey.periodic_task(crontab(hour="*/" + str(SNAPSHOT_CACHE_TTL_HOURS)))
|
||||||
|
def reload_snapshot_cache():
|
||||||
|
Backups.force_snapshot_cache_reload()
|
|
@ -0,0 +1,35 @@
|
||||||
|
import subprocess
|
||||||
|
from os.path import exists
|
||||||
|
from typing import Generator
|
||||||
|
|
||||||
|
|
||||||
|
def output_yielder(command) -> Generator[str, None, None]:
|
||||||
|
"""Note: If you break during iteration, it kills the process"""
|
||||||
|
with subprocess.Popen(
|
||||||
|
command,
|
||||||
|
shell=False,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
universal_newlines=True,
|
||||||
|
) as handle:
|
||||||
|
if handle is None or handle.stdout is None:
|
||||||
|
raise ValueError("could not run command: ", command)
|
||||||
|
|
||||||
|
try:
|
||||||
|
for line in iter(handle.stdout.readline, ""):
|
||||||
|
if "NOTICE:" not in line:
|
||||||
|
yield line
|
||||||
|
except GeneratorExit:
|
||||||
|
handle.kill()
|
||||||
|
|
||||||
|
|
||||||
|
def sync(src_path: str, dest_path: str):
|
||||||
|
"""a wrapper around rclone sync"""
|
||||||
|
|
||||||
|
if not exists(src_path):
|
||||||
|
raise ValueError("source dir for rclone sync must exist")
|
||||||
|
|
||||||
|
rclone_command = ["rclone", "sync", "-P", src_path, dest_path]
|
||||||
|
for raw_message in output_yielder(rclone_command):
|
||||||
|
if "ERROR" in raw_message:
|
||||||
|
raise ValueError(raw_message)
|
|
@ -0,0 +1,30 @@
|
||||||
|
from fastapi import Depends, HTTPException, status
|
||||||
|
from fastapi.security import APIKeyHeader
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from selfprivacy_api.actions.api_tokens import is_token_valid
|
||||||
|
|
||||||
|
|
||||||
|
class TokenHeader(BaseModel):
|
||||||
|
token: str
|
||||||
|
|
||||||
|
|
||||||
|
async def get_token_header(
|
||||||
|
token: str = Depends(APIKeyHeader(name="Authorization", auto_error=False))
|
||||||
|
) -> TokenHeader:
|
||||||
|
if token is None:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token not provided"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
token = token.replace("Bearer ", "")
|
||||||
|
if not is_token_valid(token):
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token"
|
||||||
|
)
|
||||||
|
return TokenHeader(token=token)
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_version() -> str:
|
||||||
|
"""Get API version"""
|
||||||
|
return "3.1.0"
|
|
@ -0,0 +1,21 @@
|
||||||
|
"""GraphQL API for SelfPrivacy."""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
from strawberry.permission import BasePermission
|
||||||
|
from strawberry.types import Info
|
||||||
|
|
||||||
|
from selfprivacy_api.actions.api_tokens import is_token_valid
|
||||||
|
|
||||||
|
|
||||||
|
class IsAuthenticated(BasePermission):
|
||||||
|
"""Is authenticated permission"""
|
||||||
|
|
||||||
|
message = "You must be authenticated to access this resource."
|
||||||
|
|
||||||
|
def has_permission(self, source: typing.Any, info: Info, **kwargs) -> bool:
|
||||||
|
token = info.context["request"].headers.get("Authorization")
|
||||||
|
if token is None:
|
||||||
|
token = info.context["request"].query_params.get("token")
|
||||||
|
if token is None:
|
||||||
|
return False
|
||||||
|
return is_token_valid(token.replace("Bearer ", ""))
|
|
@ -0,0 +1,36 @@
|
||||||
|
"""Backup"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
from enum import Enum
|
||||||
|
import strawberry
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class RestoreStrategy(Enum):
|
||||||
|
INPLACE = "INPLACE"
|
||||||
|
DOWNLOAD_VERIFY_OVERWRITE = "DOWNLOAD_VERIFY_OVERWRITE"
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class BackupReason(Enum):
|
||||||
|
EXPLICIT = "EXPLICIT"
|
||||||
|
AUTO = "AUTO"
|
||||||
|
PRE_RESTORE = "PRE_RESTORE"
|
||||||
|
|
||||||
|
|
||||||
|
class _AutobackupQuotas(BaseModel):
|
||||||
|
last: int
|
||||||
|
daily: int
|
||||||
|
weekly: int
|
||||||
|
monthly: int
|
||||||
|
yearly: int
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.experimental.pydantic.type(model=_AutobackupQuotas, all_fields=True)
|
||||||
|
class AutobackupQuotas:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.experimental.pydantic.input(model=_AutobackupQuotas, all_fields=True)
|
||||||
|
class AutobackupQuotasInput:
|
||||||
|
pass
|
|
@ -0,0 +1,15 @@
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: use https://strawberry.rocks/docs/integrations/pydantic when it is stable
|
||||||
|
@strawberry.type
|
||||||
|
class DnsRecord:
|
||||||
|
"""DNS record"""
|
||||||
|
|
||||||
|
record_type: str
|
||||||
|
name: str
|
||||||
|
content: str
|
||||||
|
ttl: int
|
||||||
|
priority: typing.Optional[int]
|
||||||
|
display_name: str
|
|
@ -0,0 +1,51 @@
|
||||||
|
"""Jobs status"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import datetime
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.jobs import Job, Jobs
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ApiJob:
|
||||||
|
"""Job type for GraphQL."""
|
||||||
|
|
||||||
|
uid: str
|
||||||
|
type_id: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
status: str
|
||||||
|
status_text: typing.Optional[str]
|
||||||
|
progress: typing.Optional[int]
|
||||||
|
created_at: datetime.datetime
|
||||||
|
updated_at: datetime.datetime
|
||||||
|
finished_at: typing.Optional[datetime.datetime]
|
||||||
|
error: typing.Optional[str]
|
||||||
|
result: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
def job_to_api_job(job: Job) -> ApiJob:
|
||||||
|
"""Convert a Job from jobs controller to a GraphQL ApiJob."""
|
||||||
|
return ApiJob(
|
||||||
|
uid=str(job.uid),
|
||||||
|
type_id=job.type_id,
|
||||||
|
name=job.name,
|
||||||
|
description=job.description,
|
||||||
|
status=job.status.name,
|
||||||
|
status_text=job.status_text,
|
||||||
|
progress=job.progress,
|
||||||
|
created_at=job.created_at,
|
||||||
|
updated_at=job.updated_at,
|
||||||
|
finished_at=job.finished_at,
|
||||||
|
error=job.error,
|
||||||
|
result=job.result,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_job_by_id(job_id: str) -> typing.Optional[ApiJob]:
|
||||||
|
"""Get a job for GraphQL by its ID."""
|
||||||
|
job = Jobs.get_job(job_id)
|
||||||
|
if job is None:
|
||||||
|
return None
|
||||||
|
return job_to_api_job(job)
|
|
@ -0,0 +1,182 @@
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional, List
|
||||||
|
import datetime
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
||||||
|
from selfprivacy_api.graphql.common_types.dns import DnsRecord
|
||||||
|
|
||||||
|
from selfprivacy_api.services import get_service_by_id, get_services_by_location
|
||||||
|
from selfprivacy_api.services import Service as ServiceInterface
|
||||||
|
from selfprivacy_api.services import ServiceDnsRecord
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||||
|
from selfprivacy_api.utils.network import get_ip4, get_ip6
|
||||||
|
|
||||||
|
|
||||||
|
def get_usages(root: "StorageVolume") -> list["StorageUsageInterface"]:
|
||||||
|
"""Get usages of a volume"""
|
||||||
|
return [
|
||||||
|
ServiceStorageUsage(
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
title=service.get_display_name(),
|
||||||
|
used_space=str(service.get_storage_usage()),
|
||||||
|
volume=get_volume_by_id(service.get_drive()),
|
||||||
|
)
|
||||||
|
for service in get_services_by_location(root.name)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class StorageVolume:
|
||||||
|
"""Stats and basic info about a volume or a system disk."""
|
||||||
|
|
||||||
|
total_space: str
|
||||||
|
free_space: str
|
||||||
|
used_space: str
|
||||||
|
root: bool
|
||||||
|
name: str
|
||||||
|
model: Optional[str]
|
||||||
|
serial: Optional[str]
|
||||||
|
type: str
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def usages(self) -> list["StorageUsageInterface"]:
|
||||||
|
"""Get usages of a volume"""
|
||||||
|
return get_usages(self)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.interface
|
||||||
|
class StorageUsageInterface:
|
||||||
|
used_space: str
|
||||||
|
volume: Optional[StorageVolume]
|
||||||
|
title: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ServiceStorageUsage(StorageUsageInterface):
|
||||||
|
"""Storage usage for a service"""
|
||||||
|
|
||||||
|
service: Optional["Service"]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class ServiceStatusEnum(Enum):
|
||||||
|
ACTIVE = "ACTIVE"
|
||||||
|
RELOADING = "RELOADING"
|
||||||
|
INACTIVE = "INACTIVE"
|
||||||
|
FAILED = "FAILED"
|
||||||
|
ACTIVATING = "ACTIVATING"
|
||||||
|
DEACTIVATING = "DEACTIVATING"
|
||||||
|
OFF = "OFF"
|
||||||
|
|
||||||
|
|
||||||
|
def get_storage_usage(root: "Service") -> ServiceStorageUsage:
|
||||||
|
"""Get storage usage for a service"""
|
||||||
|
service = get_service_by_id(root.id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceStorageUsage(
|
||||||
|
service=service,
|
||||||
|
title="Not found",
|
||||||
|
used_space="0",
|
||||||
|
volume=get_volume_by_id("sda1"),
|
||||||
|
)
|
||||||
|
return ServiceStorageUsage(
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
title=service.get_display_name(),
|
||||||
|
used_space=str(service.get_storage_usage()),
|
||||||
|
volume=get_volume_by_id(service.get_drive()),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: This won't be needed when deriving DnsRecord via strawberry pydantic integration
|
||||||
|
# https://strawberry.rocks/docs/integrations/pydantic
|
||||||
|
# Remove when the link above says it got stable.
|
||||||
|
def service_dns_to_graphql(record: ServiceDnsRecord) -> DnsRecord:
|
||||||
|
return DnsRecord(
|
||||||
|
record_type=record.type,
|
||||||
|
name=record.name,
|
||||||
|
content=record.content,
|
||||||
|
ttl=record.ttl,
|
||||||
|
priority=record.priority,
|
||||||
|
display_name=record.display_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Service:
|
||||||
|
id: str
|
||||||
|
display_name: str
|
||||||
|
description: str
|
||||||
|
svg_icon: str
|
||||||
|
is_movable: bool
|
||||||
|
is_required: bool
|
||||||
|
is_enabled: bool
|
||||||
|
can_be_backed_up: bool
|
||||||
|
backup_description: str
|
||||||
|
status: ServiceStatusEnum
|
||||||
|
url: Optional[str]
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def dns_records(self) -> Optional[List[DnsRecord]]:
|
||||||
|
service = get_service_by_id(self.id)
|
||||||
|
if service is None:
|
||||||
|
raise LookupError(f"no service {self.id}. Should be unreachable")
|
||||||
|
|
||||||
|
raw_records = service.get_dns_records(get_ip4(), get_ip6())
|
||||||
|
dns_records = [service_dns_to_graphql(record) for record in raw_records]
|
||||||
|
return dns_records
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def storage_usage(self) -> ServiceStorageUsage:
|
||||||
|
"""Get storage usage for a service"""
|
||||||
|
return get_storage_usage(self)
|
||||||
|
|
||||||
|
# TODO: fill this
|
||||||
|
@strawberry.field
|
||||||
|
def backup_snapshots(self) -> Optional[List["SnapshotInfo"]]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SnapshotInfo:
|
||||||
|
id: str
|
||||||
|
service: Service
|
||||||
|
created_at: datetime.datetime
|
||||||
|
reason: BackupReason
|
||||||
|
|
||||||
|
|
||||||
|
def service_to_graphql_service(service: ServiceInterface) -> Service:
|
||||||
|
"""Convert service to graphql service"""
|
||||||
|
return Service(
|
||||||
|
id=service.get_id(),
|
||||||
|
display_name=service.get_display_name(),
|
||||||
|
description=service.get_description(),
|
||||||
|
svg_icon=service.get_svg_icon(),
|
||||||
|
is_movable=service.is_movable(),
|
||||||
|
is_required=service.is_required(),
|
||||||
|
is_enabled=service.is_enabled(),
|
||||||
|
can_be_backed_up=service.can_be_backed_up(),
|
||||||
|
backup_description=service.get_backup_description(),
|
||||||
|
status=ServiceStatusEnum(service.get_status().value),
|
||||||
|
url=service.get_url(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_volume_by_id(volume_id: str) -> Optional[StorageVolume]:
|
||||||
|
"""Get volume by id"""
|
||||||
|
volume = BlockDevices().get_block_device(volume_id)
|
||||||
|
if volume is None:
|
||||||
|
return None
|
||||||
|
return StorageVolume(
|
||||||
|
total_space=str(volume.fssize)
|
||||||
|
if volume.fssize is not None
|
||||||
|
else str(volume.size),
|
||||||
|
free_space=str(volume.fsavail),
|
||||||
|
used_space=str(volume.fsused),
|
||||||
|
root=volume.name == "sda1",
|
||||||
|
name=volume.name,
|
||||||
|
model=volume.model,
|
||||||
|
serial=volume.serial,
|
||||||
|
type=volume.type,
|
||||||
|
)
|
|
@ -0,0 +1,55 @@
|
||||||
|
import typing
|
||||||
|
from enum import Enum
|
||||||
|
import strawberry
|
||||||
|
import selfprivacy_api.actions.users as users_actions
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
MutationReturnInterface,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class UserType(Enum):
|
||||||
|
NORMAL = "NORMAL"
|
||||||
|
PRIMARY = "PRIMARY"
|
||||||
|
ROOT = "ROOT"
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class User:
|
||||||
|
user_type: UserType
|
||||||
|
username: str
|
||||||
|
# userHomeFolderspace: UserHomeFolderUsage
|
||||||
|
ssh_keys: typing.List[str] = strawberry.field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class UserMutationReturn(MutationReturnInterface):
|
||||||
|
"""Return type for user mutation"""
|
||||||
|
|
||||||
|
user: typing.Optional[User] = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_by_username(username: str) -> typing.Optional[User]:
|
||||||
|
user = users_actions.get_user_by_username(username)
|
||||||
|
if user is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return User(
|
||||||
|
user_type=UserType(user.origin.value),
|
||||||
|
username=user.username,
|
||||||
|
ssh_keys=user.ssh_keys,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_users() -> typing.List[User]:
|
||||||
|
"""Get users"""
|
||||||
|
users = users_actions.get_users(exclude_root=True)
|
||||||
|
return [
|
||||||
|
User(
|
||||||
|
user_type=UserType(user.origin.value),
|
||||||
|
username=user.username,
|
||||||
|
ssh_keys=user.ssh_keys,
|
||||||
|
)
|
||||||
|
for user in users
|
||||||
|
]
|
|
@ -0,0 +1,219 @@
|
||||||
|
"""API access mutations"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import datetime
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from strawberry.types import Info
|
||||||
|
from selfprivacy_api.actions.api_tokens import (
|
||||||
|
CannotDeleteCallerException,
|
||||||
|
InvalidExpirationDate,
|
||||||
|
InvalidUsesLeft,
|
||||||
|
NotFoundException,
|
||||||
|
delete_api_token,
|
||||||
|
get_new_api_recovery_key,
|
||||||
|
use_mnemonic_recovery_token,
|
||||||
|
refresh_api_token,
|
||||||
|
delete_new_device_auth_token,
|
||||||
|
get_new_device_auth_token,
|
||||||
|
use_new_device_auth_token,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericMutationReturn,
|
||||||
|
MutationReturnInterface,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ApiKeyMutationReturn(MutationReturnInterface):
|
||||||
|
key: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeviceApiTokenMutationReturn(MutationReturnInterface):
|
||||||
|
token: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class RecoveryKeyLimitsInput:
|
||||||
|
"""Recovery key limits input"""
|
||||||
|
|
||||||
|
expiration_date: typing.Optional[datetime.datetime] = None
|
||||||
|
uses: typing.Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class UseRecoveryKeyInput:
|
||||||
|
"""Use recovery key input"""
|
||||||
|
|
||||||
|
key: str
|
||||||
|
deviceName: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class UseNewDeviceKeyInput:
|
||||||
|
"""Use new device key input"""
|
||||||
|
|
||||||
|
key: str
|
||||||
|
deviceName: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ApiMutations:
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def get_new_recovery_api_key(
|
||||||
|
self, limits: typing.Optional[RecoveryKeyLimitsInput] = None
|
||||||
|
) -> ApiKeyMutationReturn:
|
||||||
|
"""Generate recovery key"""
|
||||||
|
if limits is None:
|
||||||
|
limits = RecoveryKeyLimitsInput()
|
||||||
|
try:
|
||||||
|
key = get_new_api_recovery_key(limits.expiration_date, limits.uses)
|
||||||
|
except InvalidExpirationDate:
|
||||||
|
return ApiKeyMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Expiration date must be in the future",
|
||||||
|
code=400,
|
||||||
|
key=None,
|
||||||
|
)
|
||||||
|
except InvalidUsesLeft:
|
||||||
|
return ApiKeyMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Uses must be greater than 0",
|
||||||
|
code=400,
|
||||||
|
key=None,
|
||||||
|
)
|
||||||
|
return ApiKeyMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Recovery key generated",
|
||||||
|
code=200,
|
||||||
|
key=key,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation()
|
||||||
|
def use_recovery_api_key(
|
||||||
|
self, input: UseRecoveryKeyInput
|
||||||
|
) -> DeviceApiTokenMutationReturn:
|
||||||
|
"""Use recovery key"""
|
||||||
|
token = use_mnemonic_recovery_token(input.key, input.deviceName)
|
||||||
|
if token is not None:
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Recovery key used",
|
||||||
|
code=200,
|
||||||
|
token=token,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Recovery key not found",
|
||||||
|
code=404,
|
||||||
|
token=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def refresh_device_api_token(self, info: Info) -> DeviceApiTokenMutationReturn:
|
||||||
|
"""Refresh device api token"""
|
||||||
|
token_string = (
|
||||||
|
info.context["request"]
|
||||||
|
.headers.get("Authorization", "")
|
||||||
|
.replace("Bearer ", "")
|
||||||
|
)
|
||||||
|
if token_string is None:
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Token not found",
|
||||||
|
code=404,
|
||||||
|
token=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
new_token = refresh_api_token(token_string)
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Token refreshed",
|
||||||
|
code=200,
|
||||||
|
token=new_token,
|
||||||
|
)
|
||||||
|
except NotFoundException:
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Token not found",
|
||||||
|
code=404,
|
||||||
|
token=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def delete_device_api_token(self, device: str, info: Info) -> GenericMutationReturn:
|
||||||
|
"""Delete device api token"""
|
||||||
|
self_token = (
|
||||||
|
info.context["request"]
|
||||||
|
.headers.get("Authorization", "")
|
||||||
|
.replace("Bearer ", "")
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
delete_api_token(self_token, device)
|
||||||
|
except NotFoundException:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Token not found",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except CannotDeleteCallerException:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Cannot delete caller token",
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Token deleted",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def get_new_device_api_key(self) -> ApiKeyMutationReturn:
|
||||||
|
"""Generate device api key"""
|
||||||
|
key = get_new_device_auth_token()
|
||||||
|
return ApiKeyMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Device api key generated",
|
||||||
|
code=200,
|
||||||
|
key=key,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def invalidate_new_device_api_key(self) -> GenericMutationReturn:
|
||||||
|
"""Invalidate new device api key"""
|
||||||
|
delete_new_device_auth_token()
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="New device key deleted",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation()
|
||||||
|
def authorize_with_new_device_api_key(
|
||||||
|
self, input: UseNewDeviceKeyInput
|
||||||
|
) -> DeviceApiTokenMutationReturn:
|
||||||
|
"""Authorize with new device api key"""
|
||||||
|
token = use_new_device_auth_token(input.key, input.deviceName)
|
||||||
|
if token is None:
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Token not found",
|
||||||
|
code=404,
|
||||||
|
token=None,
|
||||||
|
)
|
||||||
|
return DeviceApiTokenMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Token used",
|
||||||
|
code=200,
|
||||||
|
token=token,
|
||||||
|
)
|
|
@ -0,0 +1,241 @@
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.jobs import Jobs
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericMutationReturn,
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
MutationReturnInterface,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.queries.backup import BackupConfiguration
|
||||||
|
from selfprivacy_api.graphql.queries.backup import Backup
|
||||||
|
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import (
|
||||||
|
AutobackupQuotasInput,
|
||||||
|
RestoreStrategy,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.backup import Backups
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
from selfprivacy_api.backup.tasks import (
|
||||||
|
start_backup,
|
||||||
|
restore_snapshot,
|
||||||
|
prune_autobackup_snapshots,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.backup.jobs import add_backup_job, add_restore_job
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class InitializeRepositoryInput:
|
||||||
|
"""Initialize repository input"""
|
||||||
|
|
||||||
|
provider: BackupProvider
|
||||||
|
# The following field may become optional for other providers?
|
||||||
|
# Backblaze takes bucket id and name
|
||||||
|
location_id: str
|
||||||
|
location_name: str
|
||||||
|
# Key ID and key for Backblaze
|
||||||
|
login: str
|
||||||
|
password: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class GenericBackupConfigReturn(MutationReturnInterface):
|
||||||
|
"""Generic backup config return"""
|
||||||
|
|
||||||
|
configuration: typing.Optional[BackupConfiguration]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class BackupMutations:
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def initialize_repository(
|
||||||
|
self, repository: InitializeRepositoryInput
|
||||||
|
) -> GenericBackupConfigReturn:
|
||||||
|
"""Initialize a new repository"""
|
||||||
|
Backups.set_provider(
|
||||||
|
kind=repository.provider,
|
||||||
|
login=repository.login,
|
||||||
|
key=repository.password,
|
||||||
|
location=repository.location_name,
|
||||||
|
repo_id=repository.location_id,
|
||||||
|
)
|
||||||
|
Backups.init_repo()
|
||||||
|
return GenericBackupConfigReturn(
|
||||||
|
success=True,
|
||||||
|
message="",
|
||||||
|
code=200,
|
||||||
|
configuration=Backup().configuration(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def remove_repository(self) -> GenericBackupConfigReturn:
|
||||||
|
"""Remove repository"""
|
||||||
|
Backups.reset()
|
||||||
|
return GenericBackupConfigReturn(
|
||||||
|
success=True,
|
||||||
|
message="",
|
||||||
|
code=200,
|
||||||
|
configuration=Backup().configuration(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def set_autobackup_period(
|
||||||
|
self, period: typing.Optional[int] = None
|
||||||
|
) -> GenericBackupConfigReturn:
|
||||||
|
"""Set autobackup period. None is to disable autobackup"""
|
||||||
|
if period is not None:
|
||||||
|
Backups.set_autobackup_period_minutes(period)
|
||||||
|
else:
|
||||||
|
Backups.set_autobackup_period_minutes(0)
|
||||||
|
|
||||||
|
return GenericBackupConfigReturn(
|
||||||
|
success=True,
|
||||||
|
message="",
|
||||||
|
code=200,
|
||||||
|
configuration=Backup().configuration(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def set_autobackup_quotas(
|
||||||
|
self, quotas: AutobackupQuotasInput
|
||||||
|
) -> GenericBackupConfigReturn:
|
||||||
|
"""
|
||||||
|
Set autobackup quotas.
|
||||||
|
Values <=0 for any timeframe mean no limits for that timeframe.
|
||||||
|
To disable autobackup use autobackup period setting, not this mutation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
job = Jobs.add(
|
||||||
|
name="Trimming autobackup snapshots",
|
||||||
|
type_id="backups.autobackup_trimming",
|
||||||
|
description="Pruning the excessive snapshots after the new autobackup quotas are set",
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
Backups.set_autobackup_quotas(quotas)
|
||||||
|
# this task is async and can fail with only a job to report the error
|
||||||
|
prune_autobackup_snapshots(job)
|
||||||
|
return GenericBackupConfigReturn(
|
||||||
|
success=True,
|
||||||
|
message="",
|
||||||
|
code=200,
|
||||||
|
configuration=Backup().configuration(),
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return GenericBackupConfigReturn(
|
||||||
|
success=False,
|
||||||
|
message=type(e).__name__ + ":" + str(e),
|
||||||
|
code=400,
|
||||||
|
configuration=Backup().configuration(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def start_backup(self, service_id: str) -> GenericJobMutationReturn:
|
||||||
|
"""Start backup"""
|
||||||
|
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=300,
|
||||||
|
message=f"nonexistent service: {service_id}",
|
||||||
|
job=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
job = add_backup_job(service)
|
||||||
|
start_backup(service_id)
|
||||||
|
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Backup job queued",
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def restore_backup(
|
||||||
|
self,
|
||||||
|
snapshot_id: str,
|
||||||
|
strategy: RestoreStrategy = RestoreStrategy.DOWNLOAD_VERIFY_OVERWRITE,
|
||||||
|
) -> GenericJobMutationReturn:
|
||||||
|
"""Restore backup"""
|
||||||
|
snap = Backups.get_snapshot_by_id(snapshot_id)
|
||||||
|
if snap is None:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=404,
|
||||||
|
message=f"No such snapshot: {snapshot_id}",
|
||||||
|
job=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
service = get_service_by_id(snap.service_name)
|
||||||
|
if service is None:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=404,
|
||||||
|
message=f"nonexistent service: {snap.service_name}",
|
||||||
|
job=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
job = add_restore_job(snap)
|
||||||
|
except ValueError as error:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=400,
|
||||||
|
message=str(error),
|
||||||
|
job=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
restore_snapshot(snap, strategy)
|
||||||
|
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="restore job created",
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def forget_snapshot(self, snapshot_id: str) -> GenericMutationReturn:
|
||||||
|
"""Forget a snapshot.
|
||||||
|
Makes it inaccessible from the server.
|
||||||
|
After some time, the data (encrypted) will not be recoverable
|
||||||
|
from the backup server too, but not immediately"""
|
||||||
|
|
||||||
|
snap = Backups.get_snapshot_by_id(snapshot_id)
|
||||||
|
if snap is None:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=404,
|
||||||
|
message=f"snapshot {snapshot_id} not found",
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
Backups.forget_snapshot(snap)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="",
|
||||||
|
)
|
||||||
|
except Exception as error:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=400,
|
||||||
|
message=str(error),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def force_snapshots_reload(self) -> GenericMutationReturn:
|
||||||
|
"""Force snapshots reload"""
|
||||||
|
Backups.force_snapshot_cache_reload()
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="",
|
||||||
|
)
|
|
@ -0,0 +1,216 @@
|
||||||
|
"""Deprecated mutations
|
||||||
|
|
||||||
|
There was made a mistake, where mutations were not grouped, and were instead
|
||||||
|
placed in the root of mutations schema. In this file, we import all the
|
||||||
|
mutations from and provide them to the root for backwards compatibility.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.common_types.user import UserMutationReturn
|
||||||
|
from selfprivacy_api.graphql.mutations.api_mutations import (
|
||||||
|
ApiKeyMutationReturn,
|
||||||
|
ApiMutations,
|
||||||
|
DeviceApiTokenMutationReturn,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
GenericMutationReturn,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.services_mutations import (
|
||||||
|
ServiceJobMutationReturn,
|
||||||
|
ServiceMutationReturn,
|
||||||
|
ServicesMutations,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.system_mutations import (
|
||||||
|
AutoUpgradeSettingsMutationReturn,
|
||||||
|
SystemMutations,
|
||||||
|
TimezoneMutationReturn,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
||||||
|
|
||||||
|
|
||||||
|
def deprecated_mutation(func, group, auth=True):
|
||||||
|
return strawberry.mutation(
|
||||||
|
resolver=func,
|
||||||
|
permission_classes=[IsAuthenticated] if auth else [],
|
||||||
|
deprecation_reason=f"Use `{group}.{func.__name__}` instead",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedApiMutations:
|
||||||
|
get_new_recovery_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.get_new_recovery_api_key,
|
||||||
|
"api",
|
||||||
|
)
|
||||||
|
|
||||||
|
use_recovery_api_key: DeviceApiTokenMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.use_recovery_api_key,
|
||||||
|
"api",
|
||||||
|
auth=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
refresh_device_api_token: DeviceApiTokenMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.refresh_device_api_token,
|
||||||
|
"api",
|
||||||
|
)
|
||||||
|
|
||||||
|
delete_device_api_token: GenericMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.delete_device_api_token,
|
||||||
|
"api",
|
||||||
|
)
|
||||||
|
|
||||||
|
get_new_device_api_key: ApiKeyMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.get_new_device_api_key,
|
||||||
|
"api",
|
||||||
|
)
|
||||||
|
|
||||||
|
invalidate_new_device_api_key: GenericMutationReturn = deprecated_mutation(
|
||||||
|
ApiMutations.invalidate_new_device_api_key,
|
||||||
|
"api",
|
||||||
|
)
|
||||||
|
|
||||||
|
authorize_with_new_device_api_key: DeviceApiTokenMutationReturn = (
|
||||||
|
deprecated_mutation(
|
||||||
|
ApiMutations.authorize_with_new_device_api_key,
|
||||||
|
"api",
|
||||||
|
auth=False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedSystemMutations:
|
||||||
|
change_timezone: TimezoneMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.change_timezone,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
change_auto_upgrade_settings: AutoUpgradeSettingsMutationReturn = (
|
||||||
|
deprecated_mutation(
|
||||||
|
SystemMutations.change_auto_upgrade_settings,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
run_system_rebuild: GenericMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.run_system_rebuild,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
run_system_rollback: GenericMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.run_system_rollback,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
run_system_upgrade: GenericMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.run_system_upgrade,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
reboot_system: GenericMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.reboot_system,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
pull_repository_changes: GenericMutationReturn = deprecated_mutation(
|
||||||
|
SystemMutations.pull_repository_changes,
|
||||||
|
"system",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedUsersMutations:
|
||||||
|
create_user: UserMutationReturn = deprecated_mutation(
|
||||||
|
UsersMutations.create_user,
|
||||||
|
"users",
|
||||||
|
)
|
||||||
|
|
||||||
|
delete_user: GenericMutationReturn = deprecated_mutation(
|
||||||
|
UsersMutations.delete_user,
|
||||||
|
"users",
|
||||||
|
)
|
||||||
|
|
||||||
|
update_user: UserMutationReturn = deprecated_mutation(
|
||||||
|
UsersMutations.update_user,
|
||||||
|
"users",
|
||||||
|
)
|
||||||
|
|
||||||
|
add_ssh_key: UserMutationReturn = deprecated_mutation(
|
||||||
|
UsersMutations.add_ssh_key,
|
||||||
|
"users",
|
||||||
|
)
|
||||||
|
|
||||||
|
remove_ssh_key: UserMutationReturn = deprecated_mutation(
|
||||||
|
UsersMutations.remove_ssh_key,
|
||||||
|
"users",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedStorageMutations:
|
||||||
|
resize_volume: GenericMutationReturn = deprecated_mutation(
|
||||||
|
StorageMutations.resize_volume,
|
||||||
|
"storage",
|
||||||
|
)
|
||||||
|
|
||||||
|
mount_volume: GenericMutationReturn = deprecated_mutation(
|
||||||
|
StorageMutations.mount_volume,
|
||||||
|
"storage",
|
||||||
|
)
|
||||||
|
|
||||||
|
unmount_volume: GenericMutationReturn = deprecated_mutation(
|
||||||
|
StorageMutations.unmount_volume,
|
||||||
|
"storage",
|
||||||
|
)
|
||||||
|
|
||||||
|
migrate_to_binds: GenericJobMutationReturn = deprecated_mutation(
|
||||||
|
StorageMutations.migrate_to_binds,
|
||||||
|
"storage",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedServicesMutations:
|
||||||
|
enable_service: ServiceMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.enable_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
disable_service: ServiceMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.disable_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
stop_service: ServiceMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.stop_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
start_service: ServiceMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.start_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
restart_service: ServiceMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.restart_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
move_service: ServiceJobMutationReturn = deprecated_mutation(
|
||||||
|
ServicesMutations.move_service,
|
||||||
|
"services",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class DeprecatedJobMutations:
|
||||||
|
remove_job: GenericMutationReturn = deprecated_mutation(
|
||||||
|
JobMutations.remove_job,
|
||||||
|
"jobs",
|
||||||
|
)
|
|
@ -0,0 +1,28 @@
|
||||||
|
"""Manipulate jobs"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.jobs import Jobs
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class JobMutations:
|
||||||
|
"""Mutations related to jobs"""
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def remove_job(self, job_id: str) -> GenericMutationReturn:
|
||||||
|
"""Remove a job from the queue"""
|
||||||
|
result = Jobs.remove_by_uid(job_id)
|
||||||
|
if result:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Job removed",
|
||||||
|
)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
code=404,
|
||||||
|
message="Job not found",
|
||||||
|
)
|
|
@ -0,0 +1,21 @@
|
||||||
|
import strawberry
|
||||||
|
import typing
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import ApiJob
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.interface
|
||||||
|
class MutationReturnInterface:
|
||||||
|
success: bool
|
||||||
|
message: str
|
||||||
|
code: int
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class GenericMutationReturn(MutationReturnInterface):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class GenericJobMutationReturn(MutationReturnInterface):
|
||||||
|
job: typing.Optional[ApiJob] = None
|
|
@ -0,0 +1,217 @@
|
||||||
|
"""Services mutations"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||||
|
from selfprivacy_api.jobs import JobStatus
|
||||||
|
|
||||||
|
from traceback import format_tb as format_traceback
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
GenericMutationReturn,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.common_types.service import (
|
||||||
|
Service,
|
||||||
|
service_to_graphql_service,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.actions.services import (
|
||||||
|
move_service,
|
||||||
|
ServiceNotFoundError,
|
||||||
|
VolumeNotFoundError,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ServiceMutationReturn(GenericMutationReturn):
|
||||||
|
"""Service mutation return type."""
|
||||||
|
|
||||||
|
service: typing.Optional[Service] = None
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class MoveServiceInput:
|
||||||
|
"""Move service input type."""
|
||||||
|
|
||||||
|
service_id: str
|
||||||
|
location: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ServiceJobMutationReturn(GenericJobMutationReturn):
|
||||||
|
"""Service job mutation return type."""
|
||||||
|
|
||||||
|
service: typing.Optional[Service] = None
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ServicesMutations:
|
||||||
|
"""Services mutations."""
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def enable_service(self, service_id: str) -> ServiceMutationReturn:
|
||||||
|
"""Enable service."""
|
||||||
|
try:
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Service not found.",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
service.enable()
|
||||||
|
except Exception as e:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=pretty_error(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service enabled.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def disable_service(self, service_id: str) -> ServiceMutationReturn:
|
||||||
|
"""Disable service."""
|
||||||
|
try:
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Service not found.",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
service.disable()
|
||||||
|
except Exception as e:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=pretty_error(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service disabled.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def stop_service(self, service_id: str) -> ServiceMutationReturn:
|
||||||
|
"""Stop service."""
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Service not found.",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
service.stop()
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service stopped.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def start_service(self, service_id: str) -> ServiceMutationReturn:
|
||||||
|
"""Start service."""
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Service not found.",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
service.start()
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service started.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def restart_service(self, service_id: str) -> ServiceMutationReturn:
|
||||||
|
"""Restart service."""
|
||||||
|
service = get_service_by_id(service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Service not found.",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
service.restart()
|
||||||
|
return ServiceMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service restarted.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def move_service(self, input: MoveServiceInput) -> ServiceJobMutationReturn:
|
||||||
|
"""Move service."""
|
||||||
|
# We need a service instance for a reply later
|
||||||
|
service = get_service_by_id(input.service_id)
|
||||||
|
if service is None:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=f"Service does not exist: {input.service_id}",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
job = move_service(input.service_id, input.location)
|
||||||
|
|
||||||
|
except (ServiceNotFoundError, VolumeNotFoundError) as e:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=pretty_error(e),
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=pretty_error(e),
|
||||||
|
code=400,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
)
|
||||||
|
|
||||||
|
if job.status in [JobStatus.CREATED, JobStatus.RUNNING]:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Started moving the service.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
elif job.status == JobStatus.FINISHED:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Service moved.",
|
||||||
|
code=200,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return ServiceJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=f"While moving service and performing the step '{job.status_text}', error occured: {job.error}",
|
||||||
|
code=400,
|
||||||
|
service=service_to_graphql_service(service),
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def pretty_error(e: Exception) -> str:
|
||||||
|
traceback = "/r".join(format_traceback(e.__traceback__))
|
||||||
|
return type(e).__name__ + ": " + str(e) + ": " + traceback
|
|
@ -0,0 +1,102 @@
|
||||||
|
"""Storage devices mutations"""
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||||
|
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
GenericMutationReturn,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.jobs.migrate_to_binds import (
|
||||||
|
BindMigrationConfig,
|
||||||
|
is_bind_migrated,
|
||||||
|
start_bind_migration,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class MigrateToBindsInput:
|
||||||
|
"""Migrate to binds input"""
|
||||||
|
|
||||||
|
email_block_device: str
|
||||||
|
bitwarden_block_device: str
|
||||||
|
gitea_block_device: str
|
||||||
|
nextcloud_block_device: str
|
||||||
|
pleroma_block_device: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class StorageMutations:
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def resize_volume(self, name: str) -> GenericMutationReturn:
|
||||||
|
"""Resize volume"""
|
||||||
|
volume = BlockDevices().get_block_device(name)
|
||||||
|
if volume is None:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False, code=404, message="Volume not found"
|
||||||
|
)
|
||||||
|
volume.resize()
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True, code=200, message="Volume resize started"
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def mount_volume(self, name: str) -> GenericMutationReturn:
|
||||||
|
"""Mount volume"""
|
||||||
|
volume = BlockDevices().get_block_device(name)
|
||||||
|
if volume is None:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False, code=404, message="Volume not found"
|
||||||
|
)
|
||||||
|
is_success = volume.mount()
|
||||||
|
if is_success:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Volume mounted, rebuild the system to apply changes",
|
||||||
|
)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False, code=409, message="Volume not mounted (already mounted?)"
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def unmount_volume(self, name: str) -> GenericMutationReturn:
|
||||||
|
"""Unmount volume"""
|
||||||
|
volume = BlockDevices().get_block_device(name)
|
||||||
|
if volume is None:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False, code=404, message="Volume not found"
|
||||||
|
)
|
||||||
|
is_success = volume.unmount()
|
||||||
|
if is_success:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Volume unmounted, rebuild the system to apply changes",
|
||||||
|
)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False, code=409, message="Volume not unmounted (already unmounted?)"
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def migrate_to_binds(self, input: MigrateToBindsInput) -> GenericJobMutationReturn:
|
||||||
|
"""Migrate to binds"""
|
||||||
|
if is_bind_migrated():
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False, code=409, message="Already migrated to binds"
|
||||||
|
)
|
||||||
|
job = start_bind_migration(
|
||||||
|
BindMigrationConfig(
|
||||||
|
email_block_device=input.email_block_device,
|
||||||
|
bitwarden_block_device=input.bitwarden_block_device,
|
||||||
|
gitea_block_device=input.gitea_block_device,
|
||||||
|
nextcloud_block_device=input.nextcloud_block_device,
|
||||||
|
pleroma_block_device=input.pleroma_block_device,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Migration to binds started, rebuild the system to apply changes",
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
|
@ -0,0 +1,211 @@
|
||||||
|
"""System management mutations"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
GenericMutationReturn,
|
||||||
|
MutationReturnInterface,
|
||||||
|
GenericJobMutationReturn,
|
||||||
|
)
|
||||||
|
|
||||||
|
import selfprivacy_api.actions.system as system_actions
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import job_to_api_job
|
||||||
|
from selfprivacy_api.jobs.nix_collect_garbage import start_nix_collect_garbage
|
||||||
|
import selfprivacy_api.actions.ssh as ssh_actions
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class TimezoneMutationReturn(MutationReturnInterface):
|
||||||
|
"""Return type of the timezone mutation, contains timezone"""
|
||||||
|
|
||||||
|
timezone: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class AutoUpgradeSettingsMutationReturn(MutationReturnInterface):
|
||||||
|
"""Return type autoUpgrade Settings"""
|
||||||
|
|
||||||
|
enableAutoUpgrade: bool
|
||||||
|
allowReboot: bool
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SSHSettingsMutationReturn(MutationReturnInterface):
|
||||||
|
"""A return type for after changing SSH settings"""
|
||||||
|
|
||||||
|
enable: bool
|
||||||
|
password_authentication: bool
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class SSHSettingsInput:
|
||||||
|
"""Input type for SSH settings"""
|
||||||
|
|
||||||
|
enable: bool
|
||||||
|
password_authentication: bool
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class AutoUpgradeSettingsInput:
|
||||||
|
"""Input type for auto upgrade settings"""
|
||||||
|
|
||||||
|
enableAutoUpgrade: typing.Optional[bool] = None
|
||||||
|
allowReboot: typing.Optional[bool] = None
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SystemMutations:
|
||||||
|
"""Mutations related to system settings"""
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def change_timezone(self, timezone: str) -> TimezoneMutationReturn:
|
||||||
|
"""Change the timezone of the server. Timezone is a tzdatabase name."""
|
||||||
|
try:
|
||||||
|
system_actions.change_timezone(timezone)
|
||||||
|
except system_actions.InvalidTimezone as e:
|
||||||
|
return TimezoneMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
timezone=None,
|
||||||
|
)
|
||||||
|
return TimezoneMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Timezone changed",
|
||||||
|
code=200,
|
||||||
|
timezone=timezone,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def change_auto_upgrade_settings(
|
||||||
|
self, settings: AutoUpgradeSettingsInput
|
||||||
|
) -> AutoUpgradeSettingsMutationReturn:
|
||||||
|
"""Change auto upgrade settings of the server."""
|
||||||
|
system_actions.set_auto_upgrade_settings(
|
||||||
|
settings.enableAutoUpgrade, settings.allowReboot
|
||||||
|
)
|
||||||
|
|
||||||
|
new_settings = system_actions.get_auto_upgrade_settings()
|
||||||
|
|
||||||
|
return AutoUpgradeSettingsMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Auto-upgrade settings changed",
|
||||||
|
code=200,
|
||||||
|
enableAutoUpgrade=new_settings.enable,
|
||||||
|
allowReboot=new_settings.allowReboot,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def change_ssh_settings(
|
||||||
|
self, settings: SSHSettingsInput
|
||||||
|
) -> SSHSettingsMutationReturn:
|
||||||
|
"""Change ssh settings of the server."""
|
||||||
|
ssh_actions.set_ssh_settings(
|
||||||
|
enable=settings.enable,
|
||||||
|
password_authentication=settings.password_authentication,
|
||||||
|
)
|
||||||
|
|
||||||
|
new_settings = ssh_actions.get_ssh_settings()
|
||||||
|
|
||||||
|
return SSHSettingsMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="SSH settings changed",
|
||||||
|
code=200,
|
||||||
|
enable=new_settings.enable,
|
||||||
|
password_authentication=new_settings.passwordAuthentication,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def run_system_rebuild(self) -> GenericJobMutationReturn:
|
||||||
|
try:
|
||||||
|
job = system_actions.rebuild_system()
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Starting system rebuild",
|
||||||
|
code=200,
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
except system_actions.ShellException as e:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def run_system_rollback(self) -> GenericMutationReturn:
|
||||||
|
system_actions.rollback_system()
|
||||||
|
try:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Starting system rollback",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
except system_actions.ShellException as e:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def run_system_upgrade(self) -> GenericJobMutationReturn:
|
||||||
|
try:
|
||||||
|
job = system_actions.upgrade_system()
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Starting system upgrade",
|
||||||
|
code=200,
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
||||||
|
except system_actions.ShellException as e:
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def reboot_system(self) -> GenericMutationReturn:
|
||||||
|
system_actions.reboot_system()
|
||||||
|
try:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="System reboot has started",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
except system_actions.ShellException as e:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def pull_repository_changes(self) -> GenericMutationReturn:
|
||||||
|
result = system_actions.pull_repository_changes()
|
||||||
|
if result.status == 0:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Repository changes pulled",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=f"Failed to pull repository changes:\n{result.data}",
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def nix_collect_garbage(self) -> GenericJobMutationReturn:
|
||||||
|
job = start_nix_collect_garbage()
|
||||||
|
|
||||||
|
return GenericJobMutationReturn(
|
||||||
|
success=True,
|
||||||
|
code=200,
|
||||||
|
message="Garbage collector started...",
|
||||||
|
job=job_to_api_job(job),
|
||||||
|
)
|
|
@ -0,0 +1,209 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Users management module"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.actions.users import UserNotFound
|
||||||
|
from selfprivacy_api.graphql.common_types.user import (
|
||||||
|
UserMutationReturn,
|
||||||
|
get_user_by_username,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.actions.ssh import (
|
||||||
|
InvalidPublicKey,
|
||||||
|
KeyAlreadyExists,
|
||||||
|
KeyNotFound,
|
||||||
|
create_ssh_key,
|
||||||
|
remove_ssh_key,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import (
|
||||||
|
GenericMutationReturn,
|
||||||
|
)
|
||||||
|
import selfprivacy_api.actions.users as users_actions
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class UserMutationInput:
|
||||||
|
"""Input type for user mutation"""
|
||||||
|
|
||||||
|
username: str
|
||||||
|
password: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.input
|
||||||
|
class SshMutationInput:
|
||||||
|
"""Input type for ssh mutation"""
|
||||||
|
|
||||||
|
username: str
|
||||||
|
ssh_key: str
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class UsersMutations:
|
||||||
|
"""Mutations change user settings"""
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def create_user(self, user: UserMutationInput) -> UserMutationReturn:
|
||||||
|
try:
|
||||||
|
users_actions.create_user(user.username, user.password)
|
||||||
|
except users_actions.PasswordIsEmpty as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except users_actions.UsernameForbidden as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=409,
|
||||||
|
)
|
||||||
|
except users_actions.UsernameNotAlphanumeric as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except users_actions.UsernameTooLong as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except users_actions.InvalidConfiguration as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except users_actions.UserAlreadyExists as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=409,
|
||||||
|
user=get_user_by_username(user.username),
|
||||||
|
)
|
||||||
|
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="User created",
|
||||||
|
code=201,
|
||||||
|
user=get_user_by_username(user.username),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def delete_user(self, username: str) -> GenericMutationReturn:
|
||||||
|
try:
|
||||||
|
users_actions.delete_user(username)
|
||||||
|
except users_actions.UserNotFound as e:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except users_actions.UserIsProtected as e:
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="User deleted",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def update_user(self, user: UserMutationInput) -> UserMutationReturn:
|
||||||
|
"""Update user mutation"""
|
||||||
|
try:
|
||||||
|
users_actions.update_user(user.username, user.password)
|
||||||
|
except users_actions.PasswordIsEmpty as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except users_actions.UserNotFound as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="User updated",
|
||||||
|
code=200,
|
||||||
|
user=get_user_by_username(user.username),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def add_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||||
|
"""Add a new ssh key"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
create_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||||
|
except KeyAlreadyExists:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Key already exists",
|
||||||
|
code=409,
|
||||||
|
)
|
||||||
|
except InvalidPublicKey:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Invalid key type. Only ssh-ed25519, ssh-rsa and ecdsa are supported",
|
||||||
|
code=400,
|
||||||
|
)
|
||||||
|
except UserNotFound:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="User not found",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="New SSH key successfully written",
|
||||||
|
code=201,
|
||||||
|
user=get_user_by_username(ssh_input.username),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def remove_ssh_key(self, ssh_input: SshMutationInput) -> UserMutationReturn:
|
||||||
|
"""Remove ssh key from user"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
remove_ssh_key(ssh_input.username, ssh_input.ssh_key)
|
||||||
|
except KeyNotFound:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="Key not found",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except UserNotFound:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message="User not found",
|
||||||
|
code=404,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=False,
|
||||||
|
message=str(e),
|
||||||
|
code=500,
|
||||||
|
)
|
||||||
|
|
||||||
|
return UserMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="SSH key successfully removed",
|
||||||
|
code=200,
|
||||||
|
user=get_user_by_username(ssh_input.username),
|
||||||
|
)
|
|
@ -0,0 +1,83 @@
|
||||||
|
"""API access status"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import datetime
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from strawberry.types import Info
|
||||||
|
from selfprivacy_api.actions.api_tokens import (
|
||||||
|
get_api_tokens_with_caller_flag,
|
||||||
|
get_api_recovery_token_status,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.dependencies import get_api_version as get_api_version_dependency
|
||||||
|
|
||||||
|
|
||||||
|
def get_api_version() -> str:
|
||||||
|
"""Get API version"""
|
||||||
|
return get_api_version_dependency()
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ApiDevice:
|
||||||
|
"""A single device with SelfPrivacy app installed"""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
creation_date: datetime.datetime
|
||||||
|
is_caller: bool
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class ApiRecoveryKeyStatus:
|
||||||
|
"""Recovery key status"""
|
||||||
|
|
||||||
|
exists: bool
|
||||||
|
valid: bool
|
||||||
|
creation_date: typing.Optional[datetime.datetime]
|
||||||
|
expiration_date: typing.Optional[datetime.datetime]
|
||||||
|
uses_left: typing.Optional[int]
|
||||||
|
|
||||||
|
|
||||||
|
def get_recovery_key_status() -> ApiRecoveryKeyStatus:
|
||||||
|
"""Get recovery key status, times are timezone-aware"""
|
||||||
|
status = get_api_recovery_token_status()
|
||||||
|
if status is None or not status.exists:
|
||||||
|
return ApiRecoveryKeyStatus(
|
||||||
|
exists=False,
|
||||||
|
valid=False,
|
||||||
|
creation_date=None,
|
||||||
|
expiration_date=None,
|
||||||
|
uses_left=None,
|
||||||
|
)
|
||||||
|
return ApiRecoveryKeyStatus(
|
||||||
|
exists=True,
|
||||||
|
valid=status.valid,
|
||||||
|
creation_date=status.date,
|
||||||
|
expiration_date=status.expiration,
|
||||||
|
uses_left=status.uses_left,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Api:
|
||||||
|
"""API access status"""
|
||||||
|
|
||||||
|
version: str = strawberry.field(resolver=get_api_version)
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def devices(self, info: Info) -> typing.List[ApiDevice]:
|
||||||
|
return [
|
||||||
|
ApiDevice(
|
||||||
|
name=device.name,
|
||||||
|
creation_date=device.date,
|
||||||
|
is_caller=device.is_caller,
|
||||||
|
)
|
||||||
|
for device in get_api_tokens_with_caller_flag(
|
||||||
|
info.context["request"]
|
||||||
|
.headers.get("Authorization", "")
|
||||||
|
.replace("Bearer ", "")
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
recovery_key: ApiRecoveryKeyStatus = strawberry.field(
|
||||||
|
resolver=get_recovery_key_status, permission_classes=[IsAuthenticated]
|
||||||
|
)
|
|
@ -0,0 +1,83 @@
|
||||||
|
"""Backup"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
|
||||||
|
from selfprivacy_api.backup import Backups
|
||||||
|
from selfprivacy_api.backup.local_secret import LocalBackupSecret
|
||||||
|
from selfprivacy_api.graphql.queries.providers import BackupProvider
|
||||||
|
from selfprivacy_api.graphql.common_types.service import (
|
||||||
|
Service,
|
||||||
|
ServiceStatusEnum,
|
||||||
|
SnapshotInfo,
|
||||||
|
service_to_graphql_service,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import AutobackupQuotas
|
||||||
|
from selfprivacy_api.services import get_service_by_id
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class BackupConfiguration:
|
||||||
|
provider: BackupProvider
|
||||||
|
# When server is lost, the app should have the key to decrypt backups
|
||||||
|
# on a new server
|
||||||
|
encryption_key: str
|
||||||
|
# False when repo is not initialized and not ready to be used
|
||||||
|
is_initialized: bool
|
||||||
|
# If none, autobackups are disabled
|
||||||
|
autobackup_period: typing.Optional[int]
|
||||||
|
# None is equal to all quotas being unlimited (-1). Optional for compatibility reasons.
|
||||||
|
autobackup_quotas: AutobackupQuotas
|
||||||
|
# Bucket name for Backblaze, path for some other providers
|
||||||
|
location_name: typing.Optional[str]
|
||||||
|
location_id: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Backup:
|
||||||
|
@strawberry.field
|
||||||
|
def configuration(self) -> BackupConfiguration:
|
||||||
|
return BackupConfiguration(
|
||||||
|
provider=Backups.provider().name,
|
||||||
|
encryption_key=LocalBackupSecret.get(),
|
||||||
|
is_initialized=Backups.is_initted(),
|
||||||
|
autobackup_period=Backups.autobackup_period_minutes(),
|
||||||
|
location_name=Backups.provider().location,
|
||||||
|
location_id=Backups.provider().repo_id,
|
||||||
|
autobackup_quotas=Backups.autobackup_quotas(),
|
||||||
|
)
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def all_snapshots(self) -> typing.List[SnapshotInfo]:
|
||||||
|
if not Backups.is_initted():
|
||||||
|
return []
|
||||||
|
result = []
|
||||||
|
snapshots = Backups.get_all_snapshots()
|
||||||
|
for snap in snapshots:
|
||||||
|
service = get_service_by_id(snap.service_name)
|
||||||
|
if service is None:
|
||||||
|
service = Service(
|
||||||
|
id=snap.service_name,
|
||||||
|
display_name=f"{snap.service_name} (Orphaned)",
|
||||||
|
description="",
|
||||||
|
svg_icon="",
|
||||||
|
is_movable=False,
|
||||||
|
is_required=False,
|
||||||
|
is_enabled=False,
|
||||||
|
status=ServiceStatusEnum.OFF,
|
||||||
|
url=None,
|
||||||
|
dns_records=None,
|
||||||
|
can_be_backed_up=False,
|
||||||
|
backup_description="",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
service = service_to_graphql_service(service)
|
||||||
|
graphql_snap = SnapshotInfo(
|
||||||
|
id=snap.id,
|
||||||
|
service=service,
|
||||||
|
created_at=snap.created_at,
|
||||||
|
reason=snap.reason,
|
||||||
|
)
|
||||||
|
result.append(graphql_snap)
|
||||||
|
return result
|
|
@ -0,0 +1,30 @@
|
||||||
|
"""Common types and enums used by different types of queries."""
|
||||||
|
from enum import Enum
|
||||||
|
import datetime
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class Severity(Enum):
|
||||||
|
"""
|
||||||
|
Severity of an alert.
|
||||||
|
"""
|
||||||
|
|
||||||
|
INFO = "INFO"
|
||||||
|
WARNING = "WARNING"
|
||||||
|
ERROR = "ERROR"
|
||||||
|
CRITICAL = "CRITICAL"
|
||||||
|
SUCCESS = "SUCCESS"
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Alert:
|
||||||
|
"""
|
||||||
|
Alert type.
|
||||||
|
"""
|
||||||
|
|
||||||
|
severity: Severity
|
||||||
|
title: str
|
||||||
|
message: str
|
||||||
|
timestamp: typing.Optional[datetime.datetime]
|
|
@ -0,0 +1,24 @@
|
||||||
|
"""Jobs status"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql.common_types.jobs import (
|
||||||
|
ApiJob,
|
||||||
|
get_api_job_by_id,
|
||||||
|
job_to_api_job,
|
||||||
|
)
|
||||||
|
|
||||||
|
from selfprivacy_api.jobs import Jobs
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Job:
|
||||||
|
@strawberry.field
|
||||||
|
def get_jobs(self) -> typing.List[ApiJob]:
|
||||||
|
Jobs.get_jobs()
|
||||||
|
|
||||||
|
return [job_to_api_job(job) for job in Jobs.get_jobs()]
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def get_job(self, job_id: str) -> typing.Optional[ApiJob]:
|
||||||
|
return get_api_job_by_id(job_id)
|
|
@ -0,0 +1,25 @@
|
||||||
|
"""Enums representing different service providers."""
|
||||||
|
from enum import Enum
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class DnsProvider(Enum):
|
||||||
|
CLOUDFLARE = "CLOUDFLARE"
|
||||||
|
DIGITALOCEAN = "DIGITALOCEAN"
|
||||||
|
DESEC = "DESEC"
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class ServerProvider(Enum):
|
||||||
|
HETZNER = "HETZNER"
|
||||||
|
DIGITALOCEAN = "DIGITALOCEAN"
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.enum
|
||||||
|
class BackupProvider(Enum):
|
||||||
|
BACKBLAZE = "BACKBLAZE"
|
||||||
|
NONE = "NONE"
|
||||||
|
# for testing purposes, make sure not selectable in prod.
|
||||||
|
MEMORY = "MEMORY"
|
||||||
|
FILE = "FILE"
|
|
@ -0,0 +1,18 @@
|
||||||
|
"""Services status"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.service import (
|
||||||
|
Service,
|
||||||
|
service_to_graphql_service,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.services import get_all_services
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Services:
|
||||||
|
@strawberry.field
|
||||||
|
def all_services(self) -> typing.List[Service]:
|
||||||
|
services = get_all_services()
|
||||||
|
return [service_to_graphql_service(service) for service in services]
|
|
@ -0,0 +1,33 @@
|
||||||
|
"""Storage queries."""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.service import (
|
||||||
|
StorageVolume,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Storage:
|
||||||
|
"""GraphQL queries to get storage information."""
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def volumes(self) -> typing.List[StorageVolume]:
|
||||||
|
"""Get list of volumes"""
|
||||||
|
return [
|
||||||
|
StorageVolume(
|
||||||
|
total_space=str(volume.fssize)
|
||||||
|
if volume.fssize is not None
|
||||||
|
else str(volume.size),
|
||||||
|
free_space=str(volume.fsavail),
|
||||||
|
used_space=str(volume.fsused),
|
||||||
|
root=volume.is_root(),
|
||||||
|
name=volume.name,
|
||||||
|
model=volume.model,
|
||||||
|
serial=volume.serial,
|
||||||
|
type=volume.type,
|
||||||
|
)
|
||||||
|
for volume in BlockDevices().get_block_devices()
|
||||||
|
]
|
|
@ -0,0 +1,171 @@
|
||||||
|
"""Common system information and settings"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import os
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql.common_types.dns import DnsRecord
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.queries.common import Alert, Severity
|
||||||
|
from selfprivacy_api.graphql.queries.providers import DnsProvider, ServerProvider
|
||||||
|
from selfprivacy_api.jobs import Jobs
|
||||||
|
from selfprivacy_api.jobs.migrate_to_binds import is_bind_migrated
|
||||||
|
from selfprivacy_api.services import get_all_required_dns_records
|
||||||
|
from selfprivacy_api.utils import ReadUserData
|
||||||
|
import selfprivacy_api.actions.system as system_actions
|
||||||
|
import selfprivacy_api.actions.ssh as ssh_actions
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SystemDomainInfo:
|
||||||
|
"""Information about the system domain"""
|
||||||
|
|
||||||
|
domain: str
|
||||||
|
hostname: str
|
||||||
|
provider: DnsProvider
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def required_dns_records(self) -> typing.List[DnsRecord]:
|
||||||
|
"""Collect all required DNS records for all services"""
|
||||||
|
return [
|
||||||
|
DnsRecord(
|
||||||
|
record_type=record.type,
|
||||||
|
name=record.name,
|
||||||
|
content=record.content,
|
||||||
|
ttl=record.ttl,
|
||||||
|
priority=record.priority,
|
||||||
|
display_name=record.display_name,
|
||||||
|
)
|
||||||
|
for record in get_all_required_dns_records()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_domain_info() -> SystemDomainInfo:
|
||||||
|
"""Get basic system domain info"""
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
return SystemDomainInfo(
|
||||||
|
domain=user_data["domain"],
|
||||||
|
hostname=user_data["hostname"],
|
||||||
|
provider=user_data["dns"]["provider"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class AutoUpgradeOptions:
|
||||||
|
"""Automatic upgrade options"""
|
||||||
|
|
||||||
|
enable: bool
|
||||||
|
allow_reboot: bool
|
||||||
|
|
||||||
|
|
||||||
|
def get_auto_upgrade_options() -> AutoUpgradeOptions:
|
||||||
|
"""Get automatic upgrade options"""
|
||||||
|
settings = system_actions.get_auto_upgrade_settings()
|
||||||
|
return AutoUpgradeOptions(
|
||||||
|
enable=settings.enable,
|
||||||
|
allow_reboot=settings.allowReboot,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SshSettings:
|
||||||
|
"""SSH settings and root SSH keys"""
|
||||||
|
|
||||||
|
enable: bool
|
||||||
|
password_authentication: bool
|
||||||
|
root_ssh_keys: typing.List[str]
|
||||||
|
|
||||||
|
|
||||||
|
def get_ssh_settings() -> SshSettings:
|
||||||
|
"""Get SSH settings"""
|
||||||
|
settings = ssh_actions.get_ssh_settings()
|
||||||
|
return SshSettings(
|
||||||
|
enable=settings.enable,
|
||||||
|
password_authentication=settings.passwordAuthentication,
|
||||||
|
root_ssh_keys=settings.rootKeys,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_timezone() -> str:
|
||||||
|
"""Get system timezone"""
|
||||||
|
return system_actions.get_timezone()
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SystemSettings:
|
||||||
|
"""Common system settings"""
|
||||||
|
|
||||||
|
auto_upgrade: AutoUpgradeOptions = strawberry.field(
|
||||||
|
resolver=get_auto_upgrade_options
|
||||||
|
)
|
||||||
|
ssh: SshSettings = strawberry.field(resolver=get_ssh_settings)
|
||||||
|
timezone: str = strawberry.field(resolver=get_system_timezone)
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_version() -> str:
|
||||||
|
"""Get system version"""
|
||||||
|
return system_actions.get_system_version()
|
||||||
|
|
||||||
|
|
||||||
|
def get_python_version() -> str:
|
||||||
|
"""Get Python version"""
|
||||||
|
return system_actions.get_python_version()
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SystemInfo:
|
||||||
|
"""System components versions"""
|
||||||
|
|
||||||
|
system_version: str = strawberry.field(resolver=get_system_version)
|
||||||
|
python_version: str = strawberry.field(resolver=get_python_version)
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def using_binds(self) -> bool:
|
||||||
|
"""Check if the system is using BINDs"""
|
||||||
|
return is_bind_migrated()
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class SystemProviderInfo:
|
||||||
|
"""Information about the VPS/Dedicated server provider"""
|
||||||
|
|
||||||
|
provider: ServerProvider
|
||||||
|
id: str
|
||||||
|
|
||||||
|
|
||||||
|
def get_system_provider_info() -> SystemProviderInfo:
|
||||||
|
"""Get system provider info"""
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
return SystemProviderInfo(
|
||||||
|
provider=user_data["server"]["provider"],
|
||||||
|
id="UNKNOWN",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class System:
|
||||||
|
"""
|
||||||
|
Base system type which represents common system status
|
||||||
|
"""
|
||||||
|
|
||||||
|
status: Alert = strawberry.field(
|
||||||
|
resolver=lambda: Alert(
|
||||||
|
severity=Severity.INFO,
|
||||||
|
title="Test message",
|
||||||
|
message="Test message",
|
||||||
|
timestamp=None,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
domain_info: SystemDomainInfo = strawberry.field(resolver=get_system_domain_info)
|
||||||
|
settings: SystemSettings = SystemSettings()
|
||||||
|
info: SystemInfo = SystemInfo()
|
||||||
|
provider: SystemProviderInfo = strawberry.field(resolver=get_system_provider_info)
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def busy(self) -> bool:
|
||||||
|
"""Check if the system is busy"""
|
||||||
|
return Jobs.is_busy()
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def working_directory(self) -> str:
|
||||||
|
"""Get working directory"""
|
||||||
|
return os.getcwd()
|
|
@ -0,0 +1,23 @@
|
||||||
|
"""Users"""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
import typing
|
||||||
|
import strawberry
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.user import (
|
||||||
|
User,
|
||||||
|
get_user_by_username,
|
||||||
|
get_users,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Users:
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def get_user(self, username: str) -> typing.Optional[User]:
|
||||||
|
"""Get users"""
|
||||||
|
return get_user_by_username(username)
|
||||||
|
|
||||||
|
all_users: typing.List[User] = strawberry.field(
|
||||||
|
permission_classes=[IsAuthenticated], resolver=get_users
|
||||||
|
)
|
|
@ -0,0 +1,150 @@
|
||||||
|
"""GraphQL API for SelfPrivacy."""
|
||||||
|
# pylint: disable=too-few-public-methods
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from typing import AsyncGenerator
|
||||||
|
import strawberry
|
||||||
|
from selfprivacy_api.graphql import IsAuthenticated
|
||||||
|
from selfprivacy_api.graphql.mutations.deprecated_mutations import (
|
||||||
|
DeprecatedApiMutations,
|
||||||
|
DeprecatedJobMutations,
|
||||||
|
DeprecatedServicesMutations,
|
||||||
|
DeprecatedStorageMutations,
|
||||||
|
DeprecatedSystemMutations,
|
||||||
|
DeprecatedUsersMutations,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.graphql.mutations.api_mutations import ApiMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.job_mutations import JobMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.mutation_interface import GenericMutationReturn
|
||||||
|
from selfprivacy_api.graphql.mutations.services_mutations import ServicesMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.storage_mutations import StorageMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.system_mutations import SystemMutations
|
||||||
|
from selfprivacy_api.graphql.mutations.backup_mutations import BackupMutations
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.queries.api_queries import Api
|
||||||
|
from selfprivacy_api.graphql.queries.backup import Backup
|
||||||
|
from selfprivacy_api.graphql.queries.jobs import Job
|
||||||
|
from selfprivacy_api.graphql.queries.services import Services
|
||||||
|
from selfprivacy_api.graphql.queries.storage import Storage
|
||||||
|
from selfprivacy_api.graphql.queries.system import System
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.mutations.users_mutations import UsersMutations
|
||||||
|
from selfprivacy_api.graphql.queries.users import Users
|
||||||
|
from selfprivacy_api.jobs.test import test_job
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Query:
|
||||||
|
"""Root schema for queries"""
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def api(self) -> Api:
|
||||||
|
"""API access status"""
|
||||||
|
return Api()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def system(self) -> System:
|
||||||
|
"""System queries"""
|
||||||
|
return System()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def users(self) -> Users:
|
||||||
|
"""Users queries"""
|
||||||
|
return Users()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def storage(self) -> Storage:
|
||||||
|
"""Storage queries"""
|
||||||
|
return Storage()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def jobs(self) -> Job:
|
||||||
|
"""Jobs queries"""
|
||||||
|
return Job()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def services(self) -> Services:
|
||||||
|
"""Services queries"""
|
||||||
|
return Services()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def backup(self) -> Backup:
|
||||||
|
"""Backup queries"""
|
||||||
|
return Backup()
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Mutation(
|
||||||
|
DeprecatedApiMutations,
|
||||||
|
DeprecatedSystemMutations,
|
||||||
|
DeprecatedUsersMutations,
|
||||||
|
DeprecatedStorageMutations,
|
||||||
|
DeprecatedServicesMutations,
|
||||||
|
DeprecatedJobMutations,
|
||||||
|
):
|
||||||
|
"""Root schema for mutations"""
|
||||||
|
|
||||||
|
@strawberry.field
|
||||||
|
def api(self) -> ApiMutations:
|
||||||
|
"""API mutations"""
|
||||||
|
return ApiMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def system(self) -> SystemMutations:
|
||||||
|
"""System mutations"""
|
||||||
|
return SystemMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def users(self) -> UsersMutations:
|
||||||
|
"""Users mutations"""
|
||||||
|
return UsersMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def storage(self) -> StorageMutations:
|
||||||
|
"""Storage mutations"""
|
||||||
|
return StorageMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def services(self) -> ServicesMutations:
|
||||||
|
"""Services mutations"""
|
||||||
|
return ServicesMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def jobs(self) -> JobMutations:
|
||||||
|
"""Jobs mutations"""
|
||||||
|
return JobMutations()
|
||||||
|
|
||||||
|
@strawberry.field(permission_classes=[IsAuthenticated])
|
||||||
|
def backup(self) -> BackupMutations:
|
||||||
|
"""Backup mutations"""
|
||||||
|
return BackupMutations()
|
||||||
|
|
||||||
|
@strawberry.mutation(permission_classes=[IsAuthenticated])
|
||||||
|
def test_mutation(self) -> GenericMutationReturn:
|
||||||
|
"""Test mutation"""
|
||||||
|
test_job()
|
||||||
|
return GenericMutationReturn(
|
||||||
|
success=True,
|
||||||
|
message="Test mutation",
|
||||||
|
code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@strawberry.type
|
||||||
|
class Subscription:
|
||||||
|
"""Root schema for subscriptions"""
|
||||||
|
|
||||||
|
@strawberry.subscription(permission_classes=[IsAuthenticated])
|
||||||
|
async def count(self, target: int = 100) -> AsyncGenerator[int, None]:
|
||||||
|
for i in range(target):
|
||||||
|
yield i
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
|
|
||||||
|
schema = strawberry.Schema(
|
||||||
|
query=Query,
|
||||||
|
mutation=Mutation,
|
||||||
|
subscription=Subscription,
|
||||||
|
)
|
|
@ -0,0 +1,323 @@
|
||||||
|
"""
|
||||||
|
Jobs controller. It handles the jobs that are created by the user.
|
||||||
|
This is a singleton class holding the jobs list.
|
||||||
|
Jobs can be added and removed.
|
||||||
|
A single job can be updated.
|
||||||
|
A job is a dictionary with the following keys:
|
||||||
|
- id: unique identifier of the job
|
||||||
|
- name: name of the job
|
||||||
|
- description: description of the job
|
||||||
|
- status: status of the job
|
||||||
|
- created_at: date of creation of the job, naive localtime
|
||||||
|
- updated_at: date of last update of the job, naive localtime
|
||||||
|
- finished_at: date of finish of the job
|
||||||
|
- error: error message if the job failed
|
||||||
|
- result: result of the job
|
||||||
|
"""
|
||||||
|
import typing
|
||||||
|
import datetime
|
||||||
|
from uuid import UUID
|
||||||
|
import uuid
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.redis_pool import RedisPool
|
||||||
|
|
||||||
|
JOB_EXPIRATION_SECONDS = 10 * 24 * 60 * 60 # ten days
|
||||||
|
|
||||||
|
STATUS_LOGS_PREFIX = "jobs_logs:status:"
|
||||||
|
PROGRESS_LOGS_PREFIX = "jobs_logs:progress:"
|
||||||
|
|
||||||
|
|
||||||
|
class JobStatus(str, Enum):
|
||||||
|
"""
|
||||||
|
Status of a job.
|
||||||
|
"""
|
||||||
|
|
||||||
|
CREATED = "CREATED"
|
||||||
|
RUNNING = "RUNNING"
|
||||||
|
FINISHED = "FINISHED"
|
||||||
|
ERROR = "ERROR"
|
||||||
|
|
||||||
|
|
||||||
|
class Job(BaseModel):
|
||||||
|
"""
|
||||||
|
Job class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uid: UUID
|
||||||
|
type_id: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
status: JobStatus
|
||||||
|
status_text: typing.Optional[str]
|
||||||
|
progress: typing.Optional[int]
|
||||||
|
created_at: datetime.datetime
|
||||||
|
updated_at: datetime.datetime
|
||||||
|
finished_at: typing.Optional[datetime.datetime]
|
||||||
|
error: typing.Optional[str]
|
||||||
|
result: typing.Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
class Jobs:
|
||||||
|
"""
|
||||||
|
Jobs class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reset() -> None:
|
||||||
|
"""
|
||||||
|
Reset the jobs list.
|
||||||
|
"""
|
||||||
|
jobs = Jobs.get_jobs()
|
||||||
|
for job in jobs:
|
||||||
|
Jobs.remove(job)
|
||||||
|
Jobs.reset_logs()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def add(
|
||||||
|
name: str,
|
||||||
|
type_id: str,
|
||||||
|
description: str,
|
||||||
|
status: JobStatus = JobStatus.CREATED,
|
||||||
|
status_text: str = "",
|
||||||
|
progress: int = 0,
|
||||||
|
) -> Job:
|
||||||
|
"""
|
||||||
|
Add a job to the jobs list.
|
||||||
|
"""
|
||||||
|
job = Job(
|
||||||
|
uid=uuid.uuid4(),
|
||||||
|
name=name,
|
||||||
|
type_id=type_id,
|
||||||
|
description=description,
|
||||||
|
status=status,
|
||||||
|
status_text=status_text,
|
||||||
|
progress=progress,
|
||||||
|
created_at=datetime.datetime.now(),
|
||||||
|
updated_at=datetime.datetime.now(),
|
||||||
|
finished_at=None,
|
||||||
|
error=None,
|
||||||
|
result=None,
|
||||||
|
)
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
_store_job_as_hash(redis, _redis_key_from_uuid(job.uid), job)
|
||||||
|
return job
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def remove(job: Job) -> None:
|
||||||
|
"""
|
||||||
|
Remove a job from the jobs list.
|
||||||
|
"""
|
||||||
|
Jobs.remove_by_uid(str(job.uid))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def remove_by_uid(job_uuid: str) -> bool:
|
||||||
|
"""
|
||||||
|
Remove a job from the jobs list.
|
||||||
|
"""
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _redis_key_from_uuid(job_uuid)
|
||||||
|
if redis.exists(key):
|
||||||
|
redis.delete(key)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reset_logs() -> None:
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
for key in redis.keys(STATUS_LOGS_PREFIX + "*"):
|
||||||
|
redis.delete(key)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def log_status_update(job: Job, status: JobStatus) -> None:
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _status_log_key_from_uuid(job.uid)
|
||||||
|
redis.lpush(key, status.value)
|
||||||
|
redis.expire(key, 10)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def log_progress_update(job: Job, progress: int) -> None:
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _progress_log_key_from_uuid(job.uid)
|
||||||
|
redis.lpush(key, progress)
|
||||||
|
redis.expire(key, 10)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def status_updates(job: Job) -> list[JobStatus]:
|
||||||
|
result: list[JobStatus] = []
|
||||||
|
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _status_log_key_from_uuid(job.uid)
|
||||||
|
if not redis.exists(key):
|
||||||
|
return []
|
||||||
|
|
||||||
|
status_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
||||||
|
for status in status_strings:
|
||||||
|
try:
|
||||||
|
result.append(JobStatus[status])
|
||||||
|
except KeyError as error:
|
||||||
|
raise ValueError("impossible job status: " + status) from error
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def progress_updates(job: Job) -> list[int]:
|
||||||
|
result: list[int] = []
|
||||||
|
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _progress_log_key_from_uuid(job.uid)
|
||||||
|
if not redis.exists(key):
|
||||||
|
return []
|
||||||
|
|
||||||
|
progress_strings: list[str] = redis.lrange(key, 0, -1) # type: ignore
|
||||||
|
for progress in progress_strings:
|
||||||
|
try:
|
||||||
|
result.append(int(progress))
|
||||||
|
except KeyError as error:
|
||||||
|
raise ValueError("impossible job progress: " + progress) from error
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update(
|
||||||
|
job: Job,
|
||||||
|
status: JobStatus,
|
||||||
|
status_text: typing.Optional[str] = None,
|
||||||
|
progress: typing.Optional[int] = None,
|
||||||
|
name: typing.Optional[str] = None,
|
||||||
|
description: typing.Optional[str] = None,
|
||||||
|
error: typing.Optional[str] = None,
|
||||||
|
result: typing.Optional[str] = None,
|
||||||
|
) -> Job:
|
||||||
|
"""
|
||||||
|
Update a job in the jobs list.
|
||||||
|
"""
|
||||||
|
if name is not None:
|
||||||
|
job.name = name
|
||||||
|
if description is not None:
|
||||||
|
job.description = description
|
||||||
|
if status_text is not None:
|
||||||
|
job.status_text = status_text
|
||||||
|
|
||||||
|
# if it is finished it is 100
|
||||||
|
# unless user says otherwise
|
||||||
|
if status == JobStatus.FINISHED and progress is None:
|
||||||
|
progress = 100
|
||||||
|
if progress is not None and job.progress != progress:
|
||||||
|
job.progress = progress
|
||||||
|
Jobs.log_progress_update(job, progress)
|
||||||
|
|
||||||
|
job.status = status
|
||||||
|
Jobs.log_status_update(job, status)
|
||||||
|
job.updated_at = datetime.datetime.now()
|
||||||
|
job.error = error
|
||||||
|
job.result = result
|
||||||
|
if status in (JobStatus.FINISHED, JobStatus.ERROR):
|
||||||
|
job.finished_at = datetime.datetime.now()
|
||||||
|
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _redis_key_from_uuid(job.uid)
|
||||||
|
if redis.exists(key):
|
||||||
|
_store_job_as_hash(redis, key, job)
|
||||||
|
if status in (JobStatus.FINISHED, JobStatus.ERROR):
|
||||||
|
redis.expire(key, JOB_EXPIRATION_SECONDS)
|
||||||
|
|
||||||
|
return job
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_expiration(job: Job, expiration_seconds: int) -> Job:
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _redis_key_from_uuid(job.uid)
|
||||||
|
if redis.exists(key):
|
||||||
|
redis.expire(key, expiration_seconds)
|
||||||
|
return job
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_job(uid: str) -> typing.Optional[Job]:
|
||||||
|
"""
|
||||||
|
Get a job from the jobs list.
|
||||||
|
"""
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
key = _redis_key_from_uuid(uid)
|
||||||
|
if redis.exists(key):
|
||||||
|
return _job_from_hash(redis, key)
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_jobs() -> typing.List[Job]:
|
||||||
|
"""
|
||||||
|
Get the jobs list.
|
||||||
|
"""
|
||||||
|
redis = RedisPool().get_connection()
|
||||||
|
job_keys = redis.keys("jobs:*")
|
||||||
|
jobs = []
|
||||||
|
for job_key in job_keys:
|
||||||
|
job = _job_from_hash(redis, job_key)
|
||||||
|
if job is not None:
|
||||||
|
jobs.append(job)
|
||||||
|
return jobs
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_busy() -> bool:
|
||||||
|
"""
|
||||||
|
Check if there is a job running.
|
||||||
|
"""
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if job.status == JobStatus.RUNNING:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def report_progress(progress: int, job: Job, status_text: str) -> None:
|
||||||
|
"""
|
||||||
|
A terse way to call a common operation, for readability
|
||||||
|
job.report_progress() would be even better
|
||||||
|
but it would go against how this file is written
|
||||||
|
"""
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text=status_text,
|
||||||
|
progress=progress,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _redis_key_from_uuid(uuid_string) -> str:
|
||||||
|
return "jobs:" + str(uuid_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _status_log_key_from_uuid(uuid_string) -> str:
|
||||||
|
return STATUS_LOGS_PREFIX + str(uuid_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _progress_log_key_from_uuid(uuid_string) -> str:
|
||||||
|
return PROGRESS_LOGS_PREFIX + str(uuid_string)
|
||||||
|
|
||||||
|
|
||||||
|
def _store_job_as_hash(redis, redis_key, model) -> None:
|
||||||
|
for key, value in model.dict().items():
|
||||||
|
if isinstance(value, uuid.UUID):
|
||||||
|
value = str(value)
|
||||||
|
if isinstance(value, datetime.datetime):
|
||||||
|
value = value.isoformat()
|
||||||
|
if isinstance(value, JobStatus):
|
||||||
|
value = value.value
|
||||||
|
redis.hset(redis_key, key, str(value))
|
||||||
|
|
||||||
|
|
||||||
|
def _job_from_hash(redis, redis_key) -> typing.Optional[Job]:
|
||||||
|
if redis.exists(redis_key):
|
||||||
|
job_dict = redis.hgetall(redis_key)
|
||||||
|
for date in [
|
||||||
|
"created_at",
|
||||||
|
"updated_at",
|
||||||
|
"finished_at",
|
||||||
|
]:
|
||||||
|
if job_dict[date] != "None":
|
||||||
|
job_dict[date] = datetime.datetime.fromisoformat(job_dict[date])
|
||||||
|
for key in job_dict.keys():
|
||||||
|
if job_dict[key] == "None":
|
||||||
|
job_dict[key] = None
|
||||||
|
|
||||||
|
return Job(**job_dict)
|
||||||
|
return None
|
|
@ -0,0 +1,329 @@
|
||||||
|
"""Function to perform migration of app data to binds."""
|
||||||
|
import subprocess
|
||||||
|
import pathlib
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from selfprivacy_api.jobs import Job, JobStatus, Jobs
|
||||||
|
from selfprivacy_api.services.bitwarden import Bitwarden
|
||||||
|
from selfprivacy_api.services.gitea import Gitea
|
||||||
|
from selfprivacy_api.services.mailserver import MailServer
|
||||||
|
from selfprivacy_api.services.nextcloud import Nextcloud
|
||||||
|
from selfprivacy_api.services.pleroma import Pleroma
|
||||||
|
from selfprivacy_api.utils import ReadUserData, WriteUserData
|
||||||
|
from selfprivacy_api.utils.huey import huey
|
||||||
|
from selfprivacy_api.utils.block_devices import BlockDevices
|
||||||
|
|
||||||
|
|
||||||
|
class BindMigrationConfig(BaseModel):
|
||||||
|
"""Config for bind migration.
|
||||||
|
For each service provide block device name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
email_block_device: str
|
||||||
|
bitwarden_block_device: str
|
||||||
|
gitea_block_device: str
|
||||||
|
nextcloud_block_device: str
|
||||||
|
pleroma_block_device: str
|
||||||
|
|
||||||
|
|
||||||
|
def is_bind_migrated() -> bool:
|
||||||
|
"""Check if bind migration was performed."""
|
||||||
|
with ReadUserData() as user_data:
|
||||||
|
return user_data.get("useBinds", False)
|
||||||
|
|
||||||
|
|
||||||
|
def activate_binds(config: BindMigrationConfig):
|
||||||
|
"""Activate binds."""
|
||||||
|
# Activate binds in userdata
|
||||||
|
with WriteUserData() as user_data:
|
||||||
|
if "email" not in user_data:
|
||||||
|
user_data["email"] = {}
|
||||||
|
user_data["email"]["location"] = config.email_block_device
|
||||||
|
if "bitwarden" not in user_data:
|
||||||
|
user_data["bitwarden"] = {}
|
||||||
|
user_data["bitwarden"]["location"] = config.bitwarden_block_device
|
||||||
|
if "gitea" not in user_data:
|
||||||
|
user_data["gitea"] = {}
|
||||||
|
user_data["gitea"]["location"] = config.gitea_block_device
|
||||||
|
if "nextcloud" not in user_data:
|
||||||
|
user_data["nextcloud"] = {}
|
||||||
|
user_data["nextcloud"]["location"] = config.nextcloud_block_device
|
||||||
|
if "pleroma" not in user_data:
|
||||||
|
user_data["pleroma"] = {}
|
||||||
|
user_data["pleroma"]["location"] = config.pleroma_block_device
|
||||||
|
|
||||||
|
user_data["useBinds"] = True
|
||||||
|
|
||||||
|
|
||||||
|
def move_folder(
|
||||||
|
data_path: pathlib.Path, bind_path: pathlib.Path, user: str, group: str
|
||||||
|
):
|
||||||
|
"""Move folder from data to bind."""
|
||||||
|
if data_path.exists():
|
||||||
|
shutil.move(str(data_path), str(bind_path))
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
data_path.mkdir(mode=0o750, parents=True, exist_ok=True)
|
||||||
|
except Exception as error:
|
||||||
|
print(f"Error creating data path: {error}")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
shutil.chown(str(bind_path), user=user, group=group)
|
||||||
|
shutil.chown(str(data_path), user=user, group=group)
|
||||||
|
except LookupError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(["mount", "--bind", str(bind_path), str(data_path)], check=True)
|
||||||
|
except subprocess.CalledProcessError as error:
|
||||||
|
print(error)
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(["chown", "-R", f"{user}:{group}", str(data_path)], check=True)
|
||||||
|
except subprocess.CalledProcessError as error:
|
||||||
|
print(error)
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def migrate_to_binds(config: BindMigrationConfig, job: Job):
|
||||||
|
"""Migrate app data to binds."""
|
||||||
|
|
||||||
|
# Exit if migration is already done
|
||||||
|
if is_bind_migrated():
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error="Migration already done.",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=0,
|
||||||
|
status_text="Checking if all volumes are available.",
|
||||||
|
)
|
||||||
|
# Get block devices.
|
||||||
|
block_devices = BlockDevices().get_block_devices()
|
||||||
|
block_device_names = [device.name for device in block_devices]
|
||||||
|
|
||||||
|
# Get all unique required block devices
|
||||||
|
required_block_devices = []
|
||||||
|
for block_device_name in config.__dict__.values():
|
||||||
|
if block_device_name not in required_block_devices:
|
||||||
|
required_block_devices.append(block_device_name)
|
||||||
|
|
||||||
|
# Check if all block devices from config are present.
|
||||||
|
for block_device_name in required_block_devices:
|
||||||
|
if block_device_name not in block_device_names:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error=f"Block device {block_device_name} not found.",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Make sure all required block devices are mounted.
|
||||||
|
# sda1 is the root partition and is always mounted.
|
||||||
|
for block_device_name in required_block_devices:
|
||||||
|
if block_device_name == "sda1":
|
||||||
|
continue
|
||||||
|
block_device = BlockDevices().get_block_device(block_device_name)
|
||||||
|
if block_device is None:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error=f"Block device {block_device_name} not found.",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
if f"/volumes/{block_device_name}" not in block_device.mountpoints:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error=f"Block device {block_device_name} not mounted.",
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Make sure /volumes/sda1 exists.
|
||||||
|
pathlib.Path("/volumes/sda1").mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=5,
|
||||||
|
status_text="Activating binds in NixOS config.",
|
||||||
|
)
|
||||||
|
|
||||||
|
activate_binds(config)
|
||||||
|
|
||||||
|
# Perform migration of Nextcloud.
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=10,
|
||||||
|
status_text="Migrating Nextcloud.",
|
||||||
|
)
|
||||||
|
|
||||||
|
Nextcloud().stop()
|
||||||
|
|
||||||
|
# If /volumes/sda1/nextcloud or /volumes/sdb/nextcloud exists, skip it.
|
||||||
|
if not pathlib.Path("/volumes/sda1/nextcloud").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/nextcloud").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/nextcloud"),
|
||||||
|
bind_path=pathlib.Path(
|
||||||
|
f"/volumes/{config.nextcloud_block_device}/nextcloud"
|
||||||
|
),
|
||||||
|
user="nextcloud",
|
||||||
|
group="nextcloud",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start Nextcloud
|
||||||
|
Nextcloud().start()
|
||||||
|
|
||||||
|
# Perform migration of Bitwarden
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=28,
|
||||||
|
status_text="Migrating Bitwarden.",
|
||||||
|
)
|
||||||
|
|
||||||
|
Bitwarden().stop()
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/bitwarden").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/bitwarden").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/bitwarden"),
|
||||||
|
bind_path=pathlib.Path(
|
||||||
|
f"/volumes/{config.bitwarden_block_device}/bitwarden"
|
||||||
|
),
|
||||||
|
user="vaultwarden",
|
||||||
|
group="vaultwarden",
|
||||||
|
)
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/bitwarden_rs").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/bitwarden_rs").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/bitwarden_rs"),
|
||||||
|
bind_path=pathlib.Path(
|
||||||
|
f"/volumes/{config.bitwarden_block_device}/bitwarden_rs"
|
||||||
|
),
|
||||||
|
user="vaultwarden",
|
||||||
|
group="vaultwarden",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start Bitwarden
|
||||||
|
Bitwarden().start()
|
||||||
|
|
||||||
|
# Perform migration of Gitea
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=46,
|
||||||
|
status_text="Migrating Gitea.",
|
||||||
|
)
|
||||||
|
|
||||||
|
Gitea().stop()
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/gitea").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/gitea").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/gitea"),
|
||||||
|
bind_path=pathlib.Path(f"/volumes/{config.gitea_block_device}/gitea"),
|
||||||
|
user="gitea",
|
||||||
|
group="gitea",
|
||||||
|
)
|
||||||
|
|
||||||
|
Gitea().start()
|
||||||
|
|
||||||
|
# Perform migration of Mail server
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=64,
|
||||||
|
status_text="Migrating Mail server.",
|
||||||
|
)
|
||||||
|
|
||||||
|
MailServer().stop()
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/vmail").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/vmail").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/vmail"),
|
||||||
|
bind_path=pathlib.Path(f"/volumes/{config.email_block_device}/vmail"),
|
||||||
|
user="virtualMail",
|
||||||
|
group="virtualMail",
|
||||||
|
)
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/sieve").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/sieve").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/sieve"),
|
||||||
|
bind_path=pathlib.Path(f"/volumes/{config.email_block_device}/sieve"),
|
||||||
|
user="virtualMail",
|
||||||
|
group="virtualMail",
|
||||||
|
)
|
||||||
|
|
||||||
|
MailServer().start()
|
||||||
|
|
||||||
|
# Perform migration of Pleroma
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=82,
|
||||||
|
status_text="Migrating Pleroma.",
|
||||||
|
)
|
||||||
|
|
||||||
|
Pleroma().stop()
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/pleroma").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/pleroma").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/pleroma"),
|
||||||
|
bind_path=pathlib.Path(
|
||||||
|
f"/volumes/{config.pleroma_block_device}/pleroma"
|
||||||
|
),
|
||||||
|
user="pleroma",
|
||||||
|
group="pleroma",
|
||||||
|
)
|
||||||
|
|
||||||
|
if not pathlib.Path("/volumes/sda1/postgresql").exists():
|
||||||
|
if not pathlib.Path("/volumes/sdb/postgresql").exists():
|
||||||
|
move_folder(
|
||||||
|
data_path=pathlib.Path("/var/lib/postgresql"),
|
||||||
|
bind_path=pathlib.Path(
|
||||||
|
f"/volumes/{config.pleroma_block_device}/postgresql"
|
||||||
|
),
|
||||||
|
user="postgres",
|
||||||
|
group="postgres",
|
||||||
|
)
|
||||||
|
|
||||||
|
Pleroma().start()
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
progress=100,
|
||||||
|
status_text="Migration finished.",
|
||||||
|
result="Migration finished.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_bind_migration(config: BindMigrationConfig) -> Job:
|
||||||
|
"""Start migration."""
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id="migrations.migrate_to_binds",
|
||||||
|
name="Migrate to binds",
|
||||||
|
description="Migration required to use the new disk space management.",
|
||||||
|
)
|
||||||
|
migrate_to_binds(config, job)
|
||||||
|
return job
|
|
@ -0,0 +1,147 @@
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
from typing import Tuple, Iterable
|
||||||
|
|
||||||
|
from selfprivacy_api.utils.huey import huey
|
||||||
|
|
||||||
|
from selfprivacy_api.jobs import JobStatus, Jobs, Job
|
||||||
|
|
||||||
|
|
||||||
|
class ShellException(Exception):
|
||||||
|
"""Shell-related errors"""
|
||||||
|
|
||||||
|
|
||||||
|
COMPLETED_WITH_ERROR = "Error occurred, please report this to the support chat."
|
||||||
|
RESULT_WAS_NOT_FOUND_ERROR = (
|
||||||
|
"We are sorry, garbage collection result was not found. "
|
||||||
|
"Something went wrong, please report this to the support chat."
|
||||||
|
)
|
||||||
|
CLEAR_COMPLETED = "Garbage collection completed."
|
||||||
|
|
||||||
|
|
||||||
|
def delete_old_gens_and_return_dead_report() -> str:
|
||||||
|
subprocess.run(
|
||||||
|
["nix-env", "-p", "/nix/var/nix/profiles/system", "--delete-generations old"],
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = subprocess.check_output(["nix-store", "--gc", "--print-dead"]).decode(
|
||||||
|
"utf-8"
|
||||||
|
)
|
||||||
|
|
||||||
|
return " " if result is None else result
|
||||||
|
|
||||||
|
|
||||||
|
def run_nix_collect_garbage() -> Iterable[bytes]:
|
||||||
|
process = subprocess.Popen(
|
||||||
|
["nix-store", "--gc"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||||
|
)
|
||||||
|
return process.stdout if process.stdout else iter([])
|
||||||
|
|
||||||
|
|
||||||
|
def parse_line(job: Job, line: str) -> Job:
|
||||||
|
"""
|
||||||
|
We parse the string for the presence of a final line,
|
||||||
|
with the final amount of space cleared.
|
||||||
|
Simply put, we're just looking for a similar string:
|
||||||
|
"1537 store paths deleted, 339.84 MiB freed".
|
||||||
|
"""
|
||||||
|
pattern = re.compile(r"[+-]?\d+\.\d+ \w+(?= freed)")
|
||||||
|
match = re.search(pattern, line)
|
||||||
|
|
||||||
|
if match is None:
|
||||||
|
raise ShellException("nix returned gibberish output")
|
||||||
|
|
||||||
|
else:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
status_text=CLEAR_COMPLETED,
|
||||||
|
result=f"{match.group(0)} have been cleared",
|
||||||
|
)
|
||||||
|
return job
|
||||||
|
|
||||||
|
|
||||||
|
def process_stream(job: Job, stream: Iterable[bytes], total_dead_packages: int) -> None:
|
||||||
|
completed_packages = 0
|
||||||
|
prev_progress = 0
|
||||||
|
|
||||||
|
for line in stream:
|
||||||
|
line = line.decode("utf-8")
|
||||||
|
|
||||||
|
if "deleting '/nix/store/" in line:
|
||||||
|
completed_packages += 1
|
||||||
|
percent = int((completed_packages / total_dead_packages) * 100)
|
||||||
|
|
||||||
|
if percent - prev_progress >= 5:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=percent,
|
||||||
|
status_text="Cleaning...",
|
||||||
|
)
|
||||||
|
prev_progress = percent
|
||||||
|
|
||||||
|
elif "store paths deleted," in line:
|
||||||
|
parse_line(job, line)
|
||||||
|
|
||||||
|
|
||||||
|
def get_dead_packages(output) -> Tuple[int, float]:
|
||||||
|
dead = len(re.findall("/nix/store/", output))
|
||||||
|
percent = 0
|
||||||
|
if dead != 0:
|
||||||
|
percent = 100 / dead
|
||||||
|
return dead, percent
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def calculate_and_clear_dead_paths(job: Job):
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=0,
|
||||||
|
status_text="Calculate the number of dead packages...",
|
||||||
|
)
|
||||||
|
|
||||||
|
dead_packages, package_equal_to_percent = get_dead_packages(
|
||||||
|
delete_old_gens_and_return_dead_report()
|
||||||
|
)
|
||||||
|
|
||||||
|
if dead_packages == 0:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
status_text="Nothing to clear",
|
||||||
|
result="System is clear",
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
progress=0,
|
||||||
|
status_text=f"Found {dead_packages} packages to remove!",
|
||||||
|
)
|
||||||
|
|
||||||
|
stream = run_nix_collect_garbage()
|
||||||
|
try:
|
||||||
|
process_stream(job, stream, dead_packages)
|
||||||
|
except ShellException as error:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
status_text=COMPLETED_WITH_ERROR,
|
||||||
|
error=RESULT_WAS_NOT_FOUND_ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_nix_collect_garbage() -> Job:
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id="maintenance.collect_nix_garbage",
|
||||||
|
name="Collect garbage",
|
||||||
|
description="Cleaning up unused packages",
|
||||||
|
)
|
||||||
|
|
||||||
|
calculate_and_clear_dead_paths(job=job)
|
||||||
|
|
||||||
|
return job
|
|
@ -0,0 +1,57 @@
|
||||||
|
import time
|
||||||
|
from selfprivacy_api.utils.huey import huey
|
||||||
|
from selfprivacy_api.jobs import JobStatus, Jobs
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def test_job():
|
||||||
|
job = Jobs.add(
|
||||||
|
type_id="test",
|
||||||
|
name="Test job",
|
||||||
|
description="This is a test job.",
|
||||||
|
status=JobStatus.CREATED,
|
||||||
|
status_text="",
|
||||||
|
progress=0,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Performing pre-move checks...",
|
||||||
|
progress=5,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Performing pre-move checks...",
|
||||||
|
progress=10,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Performing pre-move checks...",
|
||||||
|
progress=15,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Performing pre-move checks...",
|
||||||
|
progress=20,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Performing pre-move checks...",
|
||||||
|
progress=25,
|
||||||
|
)
|
||||||
|
time.sleep(5)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
status_text="Job finished.",
|
||||||
|
progress=100,
|
||||||
|
)
|
|
@ -0,0 +1,136 @@
|
||||||
|
"""
|
||||||
|
A task to start the system upgrade or rebuild by starting a systemd unit.
|
||||||
|
After starting, track the status of the systemd unit and update the Job
|
||||||
|
status accordingly.
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
from selfprivacy_api.utils.huey import huey
|
||||||
|
from selfprivacy_api.jobs import JobStatus, Jobs, Job
|
||||||
|
from selfprivacy_api.utils.waitloop import wait_until_true
|
||||||
|
from selfprivacy_api.utils.systemd import (
|
||||||
|
get_service_status,
|
||||||
|
get_last_log_lines,
|
||||||
|
ServiceStatus,
|
||||||
|
)
|
||||||
|
|
||||||
|
START_TIMEOUT = 60 * 5
|
||||||
|
START_INTERVAL = 1
|
||||||
|
RUN_TIMEOUT = 60 * 60
|
||||||
|
RUN_INTERVAL = 5
|
||||||
|
|
||||||
|
|
||||||
|
def check_if_started(unit_name: str):
|
||||||
|
"""Check if the systemd unit has started"""
|
||||||
|
try:
|
||||||
|
status = get_service_status(unit_name)
|
||||||
|
if status == ServiceStatus.ACTIVE:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def check_running_status(job: Job, unit_name: str):
|
||||||
|
"""Check if the systemd unit is running"""
|
||||||
|
try:
|
||||||
|
status = get_service_status(unit_name)
|
||||||
|
if status == ServiceStatus.INACTIVE:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
result="System rebuilt.",
|
||||||
|
progress=100,
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
if status == ServiceStatus.FAILED:
|
||||||
|
log_lines = get_last_log_lines(unit_name, 10)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error="System rebuild failed. Last log lines:\n" + "\n".join(log_lines),
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
if status == ServiceStatus.ACTIVE:
|
||||||
|
log_lines = get_last_log_lines(unit_name, 1)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text=log_lines[0] if len(log_lines) > 0 else "",
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return False
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def rebuild_system(job: Job, upgrade: bool = False):
|
||||||
|
"""
|
||||||
|
Broken out to allow calling it synchronously.
|
||||||
|
We cannot just block until task is done because it will require a second worker
|
||||||
|
Which we do not have
|
||||||
|
"""
|
||||||
|
|
||||||
|
unit_name = "sp-nixos-upgrade.service" if upgrade else "sp-nixos-rebuild.service"
|
||||||
|
try:
|
||||||
|
command = ["systemctl", "start", unit_name]
|
||||||
|
subprocess.run(
|
||||||
|
command,
|
||||||
|
check=True,
|
||||||
|
start_new_session=True,
|
||||||
|
shell=False,
|
||||||
|
)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Starting the system rebuild...",
|
||||||
|
)
|
||||||
|
# Wait for the systemd unit to start
|
||||||
|
try:
|
||||||
|
wait_until_true(
|
||||||
|
lambda: check_if_started(unit_name),
|
||||||
|
timeout_sec=START_TIMEOUT,
|
||||||
|
interval=START_INTERVAL,
|
||||||
|
)
|
||||||
|
except TimeoutError:
|
||||||
|
log_lines = get_last_log_lines(unit_name, 10)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error="System rebuild timed out. Last log lines:\n"
|
||||||
|
+ "\n".join(log_lines),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.RUNNING,
|
||||||
|
status_text="Rebuilding the system...",
|
||||||
|
)
|
||||||
|
# Wait for the systemd unit to finish
|
||||||
|
try:
|
||||||
|
wait_until_true(
|
||||||
|
lambda: check_running_status(job, unit_name),
|
||||||
|
timeout_sec=RUN_TIMEOUT,
|
||||||
|
interval=RUN_INTERVAL,
|
||||||
|
)
|
||||||
|
except TimeoutError:
|
||||||
|
log_lines = get_last_log_lines(unit_name, 10)
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
error="System rebuild timed out. Last log lines:\n"
|
||||||
|
+ "\n".join(log_lines),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.ERROR,
|
||||||
|
status_text=str(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@huey.task()
|
||||||
|
def rebuild_system_task(job: Job, upgrade: bool = False):
|
||||||
|
"""Rebuild the system"""
|
||||||
|
rebuild_system(job, upgrade)
|
|
@ -0,0 +1,48 @@
|
||||||
|
"""Migrations module.
|
||||||
|
Migrations module is introduced in v1.1.1 and provides one-shot
|
||||||
|
migrations which cannot be performed from the NixOS configuration file changes.
|
||||||
|
These migrations are checked and ran before every start of the API.
|
||||||
|
|
||||||
|
You can disable certain migrations if needed by creating an array
|
||||||
|
at api.skippedMigrations in userdata.json and populating it
|
||||||
|
with IDs of the migrations to skip.
|
||||||
|
Adding DISABLE_ALL to that array disables the migrations module entirely.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from selfprivacy_api.utils import ReadUserData, UserDataFiles
|
||||||
|
from selfprivacy_api.migrations.write_token_to_redis import WriteTokenToRedis
|
||||||
|
from selfprivacy_api.migrations.check_for_system_rebuild_jobs import (
|
||||||
|
CheckForSystemRebuildJobs,
|
||||||
|
)
|
||||||
|
|
||||||
|
migrations = [
|
||||||
|
WriteTokenToRedis(),
|
||||||
|
CheckForSystemRebuildJobs(),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def run_migrations():
|
||||||
|
"""
|
||||||
|
Go over all migrations. If they are not skipped in userdata file, run them
|
||||||
|
if the migration needed.
|
||||||
|
"""
|
||||||
|
with ReadUserData(UserDataFiles.SECRETS) as data:
|
||||||
|
if "api" not in data:
|
||||||
|
skipped_migrations = []
|
||||||
|
elif "skippedMigrations" not in data["api"]:
|
||||||
|
skipped_migrations = []
|
||||||
|
else:
|
||||||
|
skipped_migrations = data["api"].get("skippedMigrations", [])
|
||||||
|
|
||||||
|
if "DISABLE_ALL" in skipped_migrations:
|
||||||
|
return
|
||||||
|
|
||||||
|
for migration in migrations:
|
||||||
|
if migration.get_migration_name() not in skipped_migrations:
|
||||||
|
try:
|
||||||
|
if migration.is_migration_needed():
|
||||||
|
migration.migrate()
|
||||||
|
except Exception as err:
|
||||||
|
print(f"Error while migrating {migration.get_migration_name()}")
|
||||||
|
print(err)
|
||||||
|
print("Skipping this migration")
|
|
@ -0,0 +1,47 @@
|
||||||
|
from selfprivacy_api.migrations.migration import Migration
|
||||||
|
from selfprivacy_api.jobs import JobStatus, Jobs
|
||||||
|
|
||||||
|
|
||||||
|
class CheckForSystemRebuildJobs(Migration):
|
||||||
|
"""Check if there are unfinished system rebuild jobs and finish them"""
|
||||||
|
|
||||||
|
def get_migration_name(self):
|
||||||
|
return "check_for_system_rebuild_jobs"
|
||||||
|
|
||||||
|
def get_migration_description(self):
|
||||||
|
return "Check if there are unfinished system rebuild jobs and finish them"
|
||||||
|
|
||||||
|
def is_migration_needed(self):
|
||||||
|
# Check if there are any unfinished system rebuild jobs
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if (
|
||||||
|
job.type_id
|
||||||
|
in [
|
||||||
|
"system.nixos.rebuild",
|
||||||
|
"system.nixos.upgrade",
|
||||||
|
]
|
||||||
|
) and job.status in [
|
||||||
|
JobStatus.CREATED,
|
||||||
|
JobStatus.RUNNING,
|
||||||
|
]:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def migrate(self):
|
||||||
|
# As the API is restarted, we assume that the jobs are finished
|
||||||
|
for job in Jobs.get_jobs():
|
||||||
|
if (
|
||||||
|
job.type_id
|
||||||
|
in [
|
||||||
|
"system.nixos.rebuild",
|
||||||
|
"system.nixos.upgrade",
|
||||||
|
]
|
||||||
|
) and job.status in [
|
||||||
|
JobStatus.CREATED,
|
||||||
|
JobStatus.RUNNING,
|
||||||
|
]:
|
||||||
|
Jobs.update(
|
||||||
|
job=job,
|
||||||
|
status=JobStatus.FINISHED,
|
||||||
|
result="System rebuilt.",
|
||||||
|
progress=100,
|
||||||
|
)
|
|
@ -0,0 +1,28 @@
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(ABC):
|
||||||
|
"""
|
||||||
|
Abstract Migration class
|
||||||
|
This class is used to define the structure of a migration
|
||||||
|
Migration has a function is_migration_needed() that returns True or False
|
||||||
|
Migration has a function migrate() that does the migration
|
||||||
|
Migration has a function get_migration_name() that returns the migration name
|
||||||
|
Migration has a function get_migration_description() that returns the migration description
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_migration_name(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_migration_description(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_migration_needed(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def migrate(self):
|
||||||
|
pass
|
|
@ -0,0 +1,63 @@
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
from selfprivacy_api.migrations.migration import Migration
|
||||||
|
from selfprivacy_api.models.tokens.token import Token
|
||||||
|
|
||||||
|
from selfprivacy_api.repositories.tokens.redis_tokens_repository import (
|
||||||
|
RedisTokensRepository,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.repositories.tokens.abstract_tokens_repository import (
|
||||||
|
AbstractTokensRepository,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.utils import ReadUserData, UserDataFiles
|
||||||
|
|
||||||
|
|
||||||
|
class WriteTokenToRedis(Migration):
|
||||||
|
"""Load Json tokens into Redis"""
|
||||||
|
|
||||||
|
def get_migration_name(self):
|
||||||
|
return "write_token_to_redis"
|
||||||
|
|
||||||
|
def get_migration_description(self):
|
||||||
|
return "Loads the initial token into redis token storage"
|
||||||
|
|
||||||
|
def is_repo_empty(self, repo: AbstractTokensRepository) -> bool:
|
||||||
|
if repo.get_tokens() != []:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_token_from_json(self) -> Optional[Token]:
|
||||||
|
try:
|
||||||
|
with ReadUserData(UserDataFiles.SECRETS) as userdata:
|
||||||
|
return Token(
|
||||||
|
token=userdata["api"]["token"],
|
||||||
|
device_name="Initial device",
|
||||||
|
created_at=datetime.now(),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def is_migration_needed(self):
|
||||||
|
try:
|
||||||
|
if self.get_token_from_json() is not None and self.is_repo_empty(
|
||||||
|
RedisTokensRepository()
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def migrate(self):
|
||||||
|
# Write info about providers to userdata.json
|
||||||
|
try:
|
||||||
|
token = self.get_token_from_json()
|
||||||
|
if token is None:
|
||||||
|
print("No token found in secrets.json")
|
||||||
|
return
|
||||||
|
RedisTokensRepository()._store_token(token)
|
||||||
|
|
||||||
|
print("Done")
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
print("Error migrating access tokens from json to redis")
|
|
@ -0,0 +1,11 @@
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
"""for storage in Redis"""
|
||||||
|
|
||||||
|
|
||||||
|
class BackupProviderModel(BaseModel):
|
||||||
|
kind: str
|
||||||
|
login: str
|
||||||
|
key: str
|
||||||
|
location: str
|
||||||
|
repo_id: str # for app usage, not for us
|
|
@ -0,0 +1,11 @@
|
||||||
|
import datetime
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from selfprivacy_api.graphql.common_types.backup import BackupReason
|
||||||
|
|
||||||
|
|
||||||
|
class Snapshot(BaseModel):
|
||||||
|
id: str
|
||||||
|
service_name: str
|
||||||
|
created_at: datetime.datetime
|
||||||
|
reason: BackupReason = BackupReason.EXPLICIT
|
|
@ -0,0 +1,24 @@
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceStatus(Enum):
|
||||||
|
"""Enum for service status"""
|
||||||
|
|
||||||
|
ACTIVE = "ACTIVE"
|
||||||
|
RELOADING = "RELOADING"
|
||||||
|
INACTIVE = "INACTIVE"
|
||||||
|
FAILED = "FAILED"
|
||||||
|
ACTIVATING = "ACTIVATING"
|
||||||
|
DEACTIVATING = "DEACTIVATING"
|
||||||
|
OFF = "OFF"
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceDnsRecord(BaseModel):
|
||||||
|
type: str
|
||||||
|
name: str
|
||||||
|
content: str
|
||||||
|
ttl: int
|
||||||
|
display_name: str
|
||||||
|
priority: Optional[int] = None
|
|
@ -0,0 +1,48 @@
|
||||||
|
"""
|
||||||
|
New device key used to obtain access token.
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
import secrets
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from mnemonic import Mnemonic
|
||||||
|
|
||||||
|
from selfprivacy_api.models.tokens.time import is_past
|
||||||
|
|
||||||
|
|
||||||
|
class NewDeviceKey(BaseModel):
|
||||||
|
"""
|
||||||
|
Recovery key used to obtain access token.
|
||||||
|
|
||||||
|
Recovery key has a key string, date of creation, date of expiration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
key: str
|
||||||
|
created_at: datetime
|
||||||
|
expires_at: datetime
|
||||||
|
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if key is valid.
|
||||||
|
"""
|
||||||
|
if is_past(self.expires_at):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def as_mnemonic(self) -> str:
|
||||||
|
"""
|
||||||
|
Get the key as a mnemonic.
|
||||||
|
"""
|
||||||
|
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(self.key))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate() -> "NewDeviceKey":
|
||||||
|
"""
|
||||||
|
Factory to generate a random token.
|
||||||
|
"""
|
||||||
|
creation_date = datetime.now(timezone.utc)
|
||||||
|
key = secrets.token_bytes(16).hex()
|
||||||
|
return NewDeviceKey(
|
||||||
|
key=key,
|
||||||
|
created_at=creation_date,
|
||||||
|
expires_at=creation_date + timedelta(minutes=10),
|
||||||
|
)
|
|
@ -0,0 +1,61 @@
|
||||||
|
"""
|
||||||
|
Recovery key used to obtain access token.
|
||||||
|
|
||||||
|
Recovery key has a token string, date of creation, optional date of expiration and optional count of uses left.
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import secrets
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from mnemonic import Mnemonic
|
||||||
|
|
||||||
|
from selfprivacy_api.models.tokens.time import is_past, ensure_timezone
|
||||||
|
|
||||||
|
|
||||||
|
class RecoveryKey(BaseModel):
|
||||||
|
"""
|
||||||
|
Recovery key used to obtain access token.
|
||||||
|
|
||||||
|
Recovery key has a key string, date of creation, optional date of expiration and optional count of uses left.
|
||||||
|
"""
|
||||||
|
|
||||||
|
key: str
|
||||||
|
created_at: datetime
|
||||||
|
expires_at: Optional[datetime]
|
||||||
|
uses_left: Optional[int]
|
||||||
|
|
||||||
|
def is_valid(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the recovery key is valid.
|
||||||
|
"""
|
||||||
|
if self.expires_at is not None and is_past(self.expires_at):
|
||||||
|
return False
|
||||||
|
if self.uses_left is not None and self.uses_left <= 0:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def as_mnemonic(self) -> str:
|
||||||
|
"""
|
||||||
|
Get the recovery key as a mnemonic.
|
||||||
|
"""
|
||||||
|
return Mnemonic(language="english").to_mnemonic(bytes.fromhex(self.key))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate(
|
||||||
|
expiration: Optional[datetime],
|
||||||
|
uses_left: Optional[int],
|
||||||
|
) -> "RecoveryKey":
|
||||||
|
"""
|
||||||
|
Factory to generate a random token.
|
||||||
|
If passed naive time as expiration, assumes utc
|
||||||
|
"""
|
||||||
|
creation_date = datetime.now(timezone.utc)
|
||||||
|
if expiration is not None:
|
||||||
|
expiration = ensure_timezone(expiration)
|
||||||
|
key = secrets.token_bytes(24).hex()
|
||||||
|
return RecoveryKey(
|
||||||
|
key=key,
|
||||||
|
created_at=creation_date,
|
||||||
|
expires_at=expiration,
|
||||||
|
uses_left=uses_left,
|
||||||
|
)
|
|
@ -0,0 +1,14 @@
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
|
||||||
|
def is_past(dt: datetime) -> bool:
|
||||||
|
# we cannot compare a naive now()
|
||||||
|
# to dt which might be tz-aware or unaware
|
||||||
|
dt = ensure_timezone(dt)
|
||||||
|
return dt < datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_timezone(dt: datetime) -> datetime:
|
||||||
|
if dt.tzinfo is None or dt.tzinfo.utcoffset(None) is None:
|
||||||
|
dt = dt.replace(tzinfo=timezone.utc)
|
||||||
|
return dt
|
|
@ -0,0 +1,33 @@
|
||||||
|
"""
|
||||||
|
Model of the access token.
|
||||||
|
|
||||||
|
Access token has a token string, device name and date of creation.
|
||||||
|
"""
|
||||||
|
from datetime import datetime
|
||||||
|
import secrets
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
class Token(BaseModel):
|
||||||
|
"""
|
||||||
|
Model of the access token.
|
||||||
|
|
||||||
|
Access token has a token string, device name and date of creation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
token: str
|
||||||
|
device_name: str
|
||||||
|
created_at: datetime
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate(device_name: str) -> "Token":
|
||||||
|
"""
|
||||||
|
Factory to generate a random token.
|
||||||
|
"""
|
||||||
|
creation_date = datetime.now()
|
||||||
|
token = secrets.token_urlsafe(32)
|
||||||
|
return Token(
|
||||||
|
token=token,
|
||||||
|
device_name=device_name,
|
||||||
|
created_at=creation_date,
|
||||||
|
)
|
|
@ -0,0 +1,225 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
from mnemonic import Mnemonic
|
||||||
|
from secrets import randbelow
|
||||||
|
import re
|
||||||
|
|
||||||
|
from selfprivacy_api.models.tokens.token import Token
|
||||||
|
from selfprivacy_api.repositories.tokens.exceptions import (
|
||||||
|
TokenNotFound,
|
||||||
|
InvalidMnemonic,
|
||||||
|
RecoveryKeyNotFound,
|
||||||
|
NewDeviceKeyNotFound,
|
||||||
|
)
|
||||||
|
from selfprivacy_api.models.tokens.recovery_key import RecoveryKey
|
||||||
|
from selfprivacy_api.models.tokens.new_device_key import NewDeviceKey
|
||||||
|
|
||||||
|
|
||||||
|
class AbstractTokensRepository(ABC):
|
||||||
|
def get_token_by_token_string(self, token_string: str) -> Token:
|
||||||
|
"""Get the token by token"""
|
||||||
|
tokens = self.get_tokens()
|
||||||
|
for token in tokens:
|
||||||
|
if token.token == token_string:
|
||||||
|
return token
|
||||||
|
|
||||||
|
raise TokenNotFound("Token not found!")
|
||||||
|
|
||||||
|
def get_token_by_name(self, token_name: str) -> Token:
|
||||||
|
"""Get the token by name"""
|
||||||
|
tokens = self.get_tokens()
|
||||||
|
for token in tokens:
|
||||||
|
if token.device_name == token_name:
|
||||||
|
return token
|
||||||
|
|
||||||
|
raise TokenNotFound("Token not found!")
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_tokens(self) -> list[Token]:
|
||||||
|
"""Get the tokens"""
|
||||||
|
|
||||||
|
def create_token(self, device_name: str) -> Token:
|
||||||
|
"""Create new token"""
|
||||||
|
unique_name = self._make_unique_device_name(device_name)
|
||||||
|
new_token = Token.generate(unique_name)
|
||||||
|
|
||||||
|
self._store_token(new_token)
|
||||||
|
|
||||||
|
return new_token
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_token(self, input_token: Token) -> None:
|
||||||
|
"""Delete the token"""
|
||||||
|
|
||||||
|
def refresh_token(self, input_token: Token) -> Token:
|
||||||
|
"""Change the token field of the existing token"""
|
||||||
|
new_token = Token.generate(device_name=input_token.device_name)
|
||||||
|
new_token.created_at = input_token.created_at
|
||||||
|
|
||||||
|
if input_token in self.get_tokens():
|
||||||
|
self.delete_token(input_token)
|
||||||
|
self._store_token(new_token)
|
||||||
|
return new_token
|
||||||
|
|
||||||
|
raise TokenNotFound("Token not found!")
|
||||||
|
|
||||||
|
def is_token_valid(self, token_string: str) -> bool:
|
||||||
|
"""Check if the token is valid"""
|
||||||
|
return token_string in [token.token for token in self.get_tokens()]
|
||||||
|
|
||||||
|
def is_token_name_exists(self, token_name: str) -> bool:
|
||||||
|
"""Check if the token name exists"""
|
||||||
|
return token_name in [token.device_name for token in self.get_tokens()]
|
||||||
|
|
||||||
|
def is_token_name_pair_valid(self, token_name: str, token_string: str) -> bool:
|
||||||
|
"""Check if the token name and token are valid"""
|
||||||
|
try:
|
||||||
|
token = self.get_token_by_name(token_name)
|
||||||
|
if token is None:
|
||||||
|
return False
|
||||||
|
except TokenNotFound:
|
||||||
|
return False
|
||||||
|
return token.token == token_string
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_recovery_key(self) -> Optional[RecoveryKey]:
|
||||||
|
"""Get the recovery key"""
|
||||||
|
|
||||||
|
def create_recovery_key(
|
||||||
|
self,
|
||||||
|
expiration: Optional[datetime],
|
||||||
|
uses_left: Optional[int],
|
||||||
|
) -> RecoveryKey:
|
||||||
|
"""Create the recovery key"""
|
||||||
|
recovery_key = RecoveryKey.generate(expiration, uses_left)
|
||||||
|
self._store_recovery_key(recovery_key)
|
||||||
|
return recovery_key
|
||||||
|
|
||||||
|
def use_mnemonic_recovery_key(
|
||||||
|
self, mnemonic_phrase: str, device_name: str
|
||||||
|
) -> Token:
|
||||||
|
"""Use the mnemonic recovery key and create a new token with the given name"""
|
||||||
|
if not self.is_recovery_key_valid():
|
||||||
|
raise RecoveryKeyNotFound("Recovery key not found")
|
||||||
|
|
||||||
|
recovery_key = self.get_recovery_key()
|
||||||
|
|
||||||
|
if recovery_key is None:
|
||||||
|
raise RecoveryKeyNotFound("Recovery key not found")
|
||||||
|
|
||||||
|
recovery_hex_key = recovery_key.key
|
||||||
|
if not self._assert_mnemonic(recovery_hex_key, mnemonic_phrase):
|
||||||
|
raise RecoveryKeyNotFound("Recovery key not found")
|
||||||
|
|
||||||
|
new_token = self.create_token(device_name=device_name)
|
||||||
|
|
||||||
|
self._decrement_recovery_token()
|
||||||
|
|
||||||
|
return new_token
|
||||||
|
|
||||||
|
def is_recovery_key_valid(self) -> bool:
|
||||||
|
"""Check if the recovery key is valid"""
|
||||||
|
recovery_key = self.get_recovery_key()
|
||||||
|
if recovery_key is None:
|
||||||
|
return False
|
||||||
|
return recovery_key.is_valid()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _store_recovery_key(self, recovery_key: RecoveryKey) -> None:
|
||||||
|
"""Store recovery key directly"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _delete_recovery_key(self) -> None:
|
||||||
|
"""Delete the recovery key"""
|
||||||
|
|
||||||
|
def get_new_device_key(self) -> NewDeviceKey:
|
||||||
|
"""Creates and returns the new device key"""
|
||||||
|
new_device_key = NewDeviceKey.generate()
|
||||||
|
self._store_new_device_key(new_device_key)
|
||||||
|
|
||||||
|
return new_device_key
|
||||||
|
|
||||||
|
def _store_new_device_key(self, new_device_key: NewDeviceKey) -> None:
|
||||||
|
"""Store new device key directly"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_new_device_key(self) -> None:
|
||||||
|
"""Delete the new device key"""
|
||||||
|
|
||||||
|
def use_mnemonic_new_device_key(
|
||||||
|
self, mnemonic_phrase: str, device_name: str
|
||||||
|
) -> Token:
|
||||||
|
"""Use the mnemonic new device key"""
|
||||||
|
new_device_key = self._get_stored_new_device_key()
|
||||||
|
if not new_device_key:
|
||||||
|
raise NewDeviceKeyNotFound
|
||||||
|
|
||||||
|
if not new_device_key.is_valid():
|
||||||
|
raise NewDeviceKeyNotFound
|
||||||
|
|
||||||
|
if not self._assert_mnemonic(new_device_key.key, mnemonic_phrase):
|
||||||
|
raise NewDeviceKeyNotFound("Phrase is not token!")
|
||||||
|
|
||||||
|
new_token = self.create_token(device_name=device_name)
|
||||||
|
self.delete_new_device_key()
|
||||||
|
|
||||||
|
return new_token
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
for token in self.get_tokens():
|
||||||
|
self.delete_token(token)
|
||||||
|
self.delete_new_device_key()
|
||||||
|
self._delete_recovery_key()
|
||||||
|
|
||||||
|
def clone(self, source: AbstractTokensRepository) -> None:
|
||||||
|
"""Clone the state of another repository to this one"""
|
||||||
|
self.reset()
|
||||||
|
for token in source.get_tokens():
|
||||||
|
self._store_token(token)
|
||||||
|
|
||||||
|
recovery_key = source.get_recovery_key()
|
||||||
|
if recovery_key is not None:
|
||||||
|
self._store_recovery_key(recovery_key)
|
||||||
|
|
||||||
|
new_device_key = source._get_stored_new_device_key()
|
||||||
|
if new_device_key is not None:
|
||||||
|
self._store_new_device_key(new_device_key)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _store_token(self, new_token: Token):
|
||||||
|
"""Store a token directly"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _decrement_recovery_token(self):
|
||||||
|
"""Decrement recovery key use count by one"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _get_stored_new_device_key(self) -> Optional[NewDeviceKey]:
|
||||||
|
"""Retrieves new device key that is already stored."""
|
||||||
|
|
||||||
|
def _make_unique_device_name(self, name: str) -> str:
|
||||||
|
"""Token name must be an alphanumeric string and not empty.
|
||||||
|
Replace invalid characters with '_'
|
||||||
|
If name exists, add a random number to the end of the name until it is unique.
|
||||||
|
"""
|
||||||
|
if not re.match("^[a-zA-Z0-9]*$", name):
|
||||||
|
name = re.sub("[^a-zA-Z0-9]", "_", name)
|
||||||
|
if name == "":
|
||||||
|
name = "Unknown device"
|
||||||
|
while self.is_token_name_exists(name):
|
||||||
|
name += str(randbelow(10))
|
||||||
|
return name
|
||||||
|
|
||||||
|
# TODO: find a proper place for it
|
||||||
|
def _assert_mnemonic(self, hex_key: str, mnemonic_phrase: str):
|
||||||
|
"""Return true if hex string matches the phrase, false otherwise
|
||||||
|
Raise an InvalidMnemonic error if not mnemonic"""
|
||||||
|
recovery_token = bytes.fromhex(hex_key)
|
||||||
|
if not Mnemonic(language="english").check(mnemonic_phrase):
|
||||||
|
raise InvalidMnemonic("Phrase is not mnemonic!")
|
||||||
|
|
||||||
|
phrase_bytes = Mnemonic(language="english").to_entropy(mnemonic_phrase)
|
||||||
|
return phrase_bytes == recovery_token
|
|
@ -0,0 +1,14 @@
|
||||||
|
class TokenNotFound(Exception):
|
||||||
|
"""Token not found!"""
|
||||||
|
|
||||||
|
|
||||||
|
class RecoveryKeyNotFound(Exception):
|
||||||
|
"""Recovery key not found!"""
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidMnemonic(Exception):
|
||||||
|
"""Phrase is not mnemonic!"""
|
||||||
|
|
||||||
|
|
||||||
|
class NewDeviceKeyNotFound(Exception):
|
||||||
|
"""New device key not found!"""
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue