diff --git a/SETUP.md b/SETUP.md index bf8f774..86f5daf 100644 --- a/SETUP.md +++ b/SETUP.md @@ -20,7 +20,7 @@ Log into your server and download the Satellite files. This guide assumes that y ``` mkdir ~/satellite cd ~/satellite -wget -q https://github.com/mike76-dev/satellite/releases/download/v0.10.0/satellite_linux_amd64.zip +wget -q https://github.com/mike76-dev/sia-satellite/releases/download/v0.10.0/satellite_linux_amd64.zip unzip satellite_linux_amd64.zip rm satellite_linux_amd64.zip ``` diff --git a/go.mod b/go.mod index 5ffc0e2..1d13b57 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/mike76-dev/sia-satellite -go 1.21 +go 1.21.6 + +toolchain go1.21.7 require ( github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf @@ -12,16 +14,23 @@ require ( github.com/rs/xid v1.5.0 github.com/spf13/cobra v1.7.0 github.com/stripe/stripe-go/v75 v75.10.0 - gitlab.com/NebulousLabs/go-upnp v0.0.0-20211002182029-11da932010b6 - gitlab.com/NebulousLabs/log v0.0.0-20210609172545-77f6775350e2 gitlab.com/NebulousLabs/merkletree v0.0.0-20200118113624-07fbf710afc4 - go.sia.tech/core v0.1.12-0.20230623163824-788ed8e10817 + go.sia.tech/core v0.2.3-0.20240416172826-f9d44a4149e1 + go.sia.tech/coreutils v0.0.4-0.20240417205447-a3dce82e35e3 + go.sia.tech/jape v0.11.1 go.sia.tech/mux v1.2.0 - golang.org/x/crypto v0.15.0 - golang.org/x/term v0.14.0 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.22.0 + golang.org/x/term v0.19.0 lukechampine.com/frand v1.4.2 ) +require ( + go.etcd.io/bbolt v1.3.9 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/tools v0.6.0 // indirect +) + require ( github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect github.com/golang-jwt/jwt/v5 v5.2.0 @@ -29,8 +38,5 @@ require ( github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/spf13/pflag v1.0.5 // indirect gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 // indirect - gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.19.0 // indirect ) diff --git a/go.sum b/go.sum index 35a459d..bd5bb06 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,9 @@ github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf h1:K5VXW9LjmJv/xhjvQcNWTdk4WOSyreil6YaubuCPeRY= github.com/dchest/threefish v0.0.0-20120919164726-3ecf4c494abf/go.mod h1:bXVurdTuvOiJu7NHALemFe0JMvC2UmwYHW+7fcZaZ2M= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= @@ -31,8 +32,9 @@ github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRM github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stripe/stripe-go/v75 v75.10.0 h1:Sj/gGshIMQMgCQ3K92+RUEoXsAi4tMABuLbsDBs1ULg= github.com/stripe/stripe-go/v75 v75.10.0/go.mod h1:wT44gah+eCY8Z0aSpY/vQlYYbicU9uUAbAqdaUxxDqE= gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= @@ -40,52 +42,54 @@ gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bF gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= -gitlab.com/NebulousLabs/go-upnp v0.0.0-20211002182029-11da932010b6 h1:WKij6HF8ECp9E7K0E44dew9NrRDGiNR5u4EFsXnJUx4= -gitlab.com/NebulousLabs/go-upnp v0.0.0-20211002182029-11da932010b6/go.mod h1:vhrHTGDh4YR7wK8Z+kRJ+x8SF/6RUM3Vb64Si5FD0L8= -gitlab.com/NebulousLabs/log v0.0.0-20210609172545-77f6775350e2 h1:ovh05+n1jw7R9KT3qa5kdK4T26fIKyVogws06goZ5+Y= -gitlab.com/NebulousLabs/log v0.0.0-20210609172545-77f6775350e2/go.mod h1:qOhJbQ7Vzw+F+RCVmpPZ7WAwBIM9PZv4tWKp6Kgd9CY= gitlab.com/NebulousLabs/merkletree v0.0.0-20200118113624-07fbf710afc4 h1:iuNdBfBg0umjOvrEf9MxGzK+NwAyE2oCZjDqUx9zVFs= gitlab.com/NebulousLabs/merkletree v0.0.0-20200118113624-07fbf710afc4/go.mod h1:0cjDwhA+Pv9ZQXHED7HUSS3sCvo2zgsoaMgE7MeGBWo= -go.sia.tech/core v0.1.12-0.20230623163824-788ed8e10817 h1:2cAVfksJnxQpYIZK8MRbdmOEZEc87HbA4aq2fuIToy4= -go.sia.tech/core v0.1.12-0.20230623163824-788ed8e10817/go.mod h1:D17UWSn99SEfQnEaR9G9n6Kz9+BwqMoUgZ6Cl424LsQ= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.sia.tech/core v0.2.3-0.20240416172826-f9d44a4149e1 h1:tG9JJk6qPevT2CrFttL9Y4ZIT5+RS3J+Hk9E3zJaGiY= +go.sia.tech/core v0.2.3-0.20240416172826-f9d44a4149e1/go.mod h1:24liZWimivGQF+h3d14ly9oEpMIYxHPSgEMKmunxxi0= +go.sia.tech/coreutils v0.0.4-0.20240417205447-a3dce82e35e3 h1:GfPdg+kQqqUpMUp6nLocJ8lgBtuOJnKMfX64ax5jOJk= +go.sia.tech/coreutils v0.0.4-0.20240417205447-a3dce82e35e3/go.mod h1:6e0bDPG1qQa6jrkQhgY925lGHmTNbtq8nQwAmfpWhmo= +go.sia.tech/jape v0.11.1 h1:M7IP+byXL7xOqzxcHUQuXW+q3sYMkYzmMlMw+q8ZZw0= +go.sia.tech/jape v0.11.1/go.mod h1:4QqmBB+t3W7cNplXPj++ZqpoUb2PeiS66RLpXmEGap4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/init.sql b/init.sql index a5d8ffc..7623c3c 100644 --- a/init.sql +++ b/init.sql @@ -1,270 +1,65 @@ -/* gateway */ - -DROP TABLE IF EXISTS gw_nodes; -DROP TABLE IF EXISTS gw_url; -DROP TABLE IF EXISTS gw_blocklist; - -CREATE TABLE gw_nodes ( - address VARCHAR(255) NOT NULL, - outbound BOOL, - PRIMARY KEY (address) -); - -CREATE TABLE gw_url ( - router_url VARCHAR(255) NOT NULL -); - -INSERT INTO gw_url (router_url) VALUES (''); - -CREATE TABLE gw_blocklist ( - ip VARCHAR(255) NOT NULL, - PRIMARY KEY (ip) -); - -/* consensus */ - -DROP TABLE IF EXISTS cs_height; -DROP TABLE IF EXISTS cs_consistency; -DROP TABLE IF EXISTS cs_sfpool; -DROP TABLE IF EXISTS cs_changelog; -DROP TABLE IF EXISTS cs_dsco; -DROP TABLE IF EXISTS cs_fcex; -DROP TABLE IF EXISTS cs_oak; -DROP TABLE IF EXISTS cs_oak_init; -DROP TABLE IF EXISTS cs_sco; -DROP TABLE IF EXISTS cs_fc; -DROP TABLE IF EXISTS cs_sfo; -DROP TABLE IF EXISTS cs_fuh; -DROP TABLE IF EXISTS cs_fuh_current; -DROP TABLE IF EXISTS cs_map; -DROP TABLE IF EXISTS cs_path; -DROP TABLE IF EXISTS cs_cl; -DROP TABLE IF EXISTS cs_dos; - -CREATE TABLE cs_height ( - id INT NOT NULL AUTO_INCREMENT, - height BIGINT UNSIGNED NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_consistency ( - id INT NOT NULL AUTO_INCREMENT, - inconsistency BOOL NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_sfpool ( - id INT NOT NULL AUTO_INCREMENT, - bytes BLOB NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_changelog ( - id INT NOT NULL AUTO_INCREMENT, - bytes BINARY(32) NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_dsco ( - height BIGINT UNSIGNED NOT NULL, - scoid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (scoid ASC) -); - -CREATE TABLE cs_fcex ( - height BIGINT UNSIGNED NOT NULL, - fcid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (fcid ASC) -); - -CREATE TABLE cs_oak ( - bid BINARY(32) NOT NULL UNIQUE, - bytes BINARY(40) NOT NULL, - PRIMARY KEY (bid ASC) -); - -CREATE TABLE cs_oak_init ( - id INT NOT NULL AUTO_INCREMENT, - init BOOL NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_sco ( - scoid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (scoid ASC) -); - -CREATE TABLE cs_fc ( - fcid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (fcid ASC) -); - -CREATE TABLE cs_sfo ( - sfoid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (sfoid ASC) -); - -CREATE TABLE cs_fuh ( - height BIGINT UNSIGNED NOT NULL, - bytes BINARY(64) NOT NULL, - PRIMARY KEY (height ASC) -); - -CREATE TABLE cs_fuh_current ( - id INT NOT NULL AUTO_INCREMENT, - bytes BINARY(64) NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_path ( - height BIGINT UNSIGNED NOT NULL, - bid BINARY(32) NOT NULL, - PRIMARY KEY (height ASC) -); - -CREATE TABLE cs_map ( - id INT NOT NULL AUTO_INCREMENT, - bid BINARY(32) NOT NULL UNIQUE, - bytes LONGBLOB NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE cs_cl ( - ceid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (ceid ASC) -); - -CREATE TABLE cs_dos ( - bid BINARY(32) NOT NULL, - PRIMARY KEY (bid ASC) -); - -/* transactionpool */ - -DROP TABLE IF EXISTS tp_height; -DROP TABLE IF EXISTS tp_ctx; -DROP TABLE IF EXISTS tp_median; -DROP TABLE IF EXISTS tp_cc; -DROP TABLE IF EXISTS tp_recent; - -CREATE TABLE tp_height ( - id INT NOT NULL AUTO_INCREMENT, - height BIGINT UNSIGNED NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE tp_ctx ( - txid BINARY(32) NOT NULL, - PRIMARY KEY (txid), - INDEX txid (txid ASC) -); - -CREATE TABLE tp_median ( - id INT NOT NULL AUTO_INCREMENT, - bytes BLOB NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE tp_cc ( - id INT NOT NULL AUTO_INCREMENT, - ceid BINARY(32) NOT NULL, - PRIMARY KEY (id) -); - -CREATE TABLE tp_recent ( - id INT NOT NULL AUTO_INCREMENT, - bid BINARY(32) NOT NULL, - PRIMARY KEY (id) -); - /* wallet */ -DROP TABLE IF EXISTS wt_addr; -DROP TABLE IF EXISTS wt_txn; -DROP TABLE IF EXISTS wt_sco; -DROP TABLE IF EXISTS wt_sfo; -DROP TABLE IF EXISTS wt_spo; -DROP TABLE IF EXISTS wt_uc; +DROP TABLE IF EXISTS wt_sces; +DROP TABLE IF EXISTS wt_sfes; +DROP TABLE IF EXISTS wt_watched; +DROP TABLE IF EXISTS wt_addresses; +DROP TABLE IF EXISTS wt_tip; DROP TABLE IF EXISTS wt_info; -DROP TABLE IF EXISTS wt_watch; -DROP TABLE IF EXISTS wt_aux; -DROP TABLE IF EXISTS wt_keys; +DROP TABLE IF EXISTS wt_spent; -CREATE TABLE wt_txn ( - id INT NOT NULL AUTO_INCREMENT, - txid BINARY(32) NOT NULL UNIQUE, - bytes BLOB NOT NULL, +CREATE TABLE wt_addresses ( + id BIGINT NOT NULL AUTO_INCREMENT, + addr BINARY(32) NOT NULL UNIQUE, PRIMARY KEY (id) ); -CREATE TABLE wt_addr ( - id INT NOT NULL AUTO_INCREMENT, - addr BINARY(32) NOT NULL, - txid BINARY(32) NOT NULL, +CREATE TABLE wt_sces ( + id BIGINT NOT NULL AUTO_INCREMENT, + scoid BINARY(32) NOT NULL UNIQUE, + sc_value BLOB NOT NULL, + merkle_proof BLOB NOT NULL, + leaf_index BIGINT UNSIGNED NOT NULL, + maturity_height BIGINT UNSIGNED NOT NULL, + address_id BIGINT NOT NULL, PRIMARY KEY (id), - FOREIGN KEY (txid) REFERENCES wt_txn(txid) -); - -CREATE TABLE wt_sco ( - scoid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (scoid ASC) -); - -CREATE TABLE wt_sfo ( - sfoid BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (sfoid ASC) + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) ); -CREATE TABLE wt_spo ( - oid BINARY(32) NOT NULL, - height BIGINT UNSIGNED NOT NULL, - PRIMARY KEY (oid ASC) +CREATE TABLE wt_sfes ( + id BIGINT NOT NULL AUTO_INCREMENT, + sfoid BINARY(32) NOT NULL UNIQUE, + claim_start BLOB NOT NULL, + merkle_proof BLOB NOT NULL, + leaf_index BIGINT UNSIGNED NOT NULL, + sf_value BIGINT UNSIGNED NOT NULL, + address_id BIGINT NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) ); -CREATE TABLE wt_uc ( - addr BINARY(32) NOT NULL, - bytes BLOB NOT NULL, - PRIMARY KEY (addr ASC) +CREATE TABLE wt_watched ( + address_id BIGINT NOT NULL UNIQUE, + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) ); -CREATE TABLE wt_info ( +CREATE TABLE wt_tip ( id INT NOT NULL AUTO_INCREMENT, - cc BINARY(32) NOT NULL, height BIGINT UNSIGNED NOT NULL, - encrypted BLOB NOT NULL, - sfpool BLOB NOT NULL, - salt BINARY(32) NOT NULL, - progress BIGINT UNSIGNED NOT NULL, - seed BLOB NOT NULL, - pwd BLOB NOT NULL, + bid BINARY(32) NOT NULL, PRIMARY KEY (id) ); -CREATE TABLE wt_aux ( - salt BINARY(32) NOT NULL, - encrypted BLOB NOT NULL, - seed BLOB NOT NULL, - PRIMARY KEY (seed(32)) -); - -CREATE TABLE wt_keys ( - salt BINARY(32) NOT NULL, - encrypted BLOB NOT NULL, - skey BLOB NOT NULL, - PRIMARY KEY (skey(32)) +CREATE TABLE wt_info ( + id INT NOT NULL AUTO_INCREMENT, + seed BINARY(16) NOT NULL, + progress BIGINT UNSIGNED NOT NULL, + PRIMARY KEY (id) ); -CREATE TABLE wt_watch ( - addr BINARY(32) NOT NULL, - PRIMARY KEY (addr ASC) +CREATE TABLE wt_spent ( + id BINARY(32) NOT NULL, + PRIMARY KEY (id) ); /* provider */ @@ -470,9 +265,9 @@ CREATE TABLE hdb_contracts ( CREATE TABLE hdb_info ( id INT NOT NULL AUTO_INCREMENT, height BIGINT UNSIGNED NOT NULL, + bid BINARY(32) NOT NULL, scan_complete BOOL NOT NULL, disable_ip_check BOOL NOT NULL, - last_change BINARY(32) NOT NULL, filter_mode INT NOT NULL, PRIMARY KEY (id) ); @@ -522,7 +317,7 @@ CREATE TABLE ctr_contracts ( CREATE TABLE ctr_info ( id INT NOT NULL AUTO_INCREMENT, height BIGINT UNSIGNED NOT NULL, - last_change BINARY(32) NOT NULL, + bid BINARY(32) NOT NULL, synced BOOL NOT NULL, PRIMARY KEY (id) ); diff --git a/modules/alert.go b/modules/alert.go deleted file mode 100644 index ca21937..0000000 --- a/modules/alert.go +++ /dev/null @@ -1,215 +0,0 @@ -package modules - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "sync" -) - -// The following consts are the different types of severity levels available in -// the alert system. -const ( - // SeverityUnknown is the value of an uninitialized severity and should never - // be used. - SeverityUnknown = iota - // SeverityInfo shows the user potentially useful information, such as the - // status of long running actions. - SeverityInfo - // SeverityWarning warns the user about potential issues which might require - // preventive actions. - SeverityWarning - // SeverityError should be used for information about the system where - // immediate action is recommended to avoid further issues like loss of data. - SeverityError - // SeverityCritical should be used for critical errors. e.g. a lack of funds - // causing data to get lost without immediate action. - SeverityCritical -) - -// The following consts are a list of AlertIDs. All IDs used throughout Satellite -// should be unique and listed here. -const ( - // alertIDUnknown is the id of an unknown alert. - alertIDUnknown = "unknown" - // AlertIDWalletLockedDuringMaintenance is the id of the alert that is - // registered if the wallet is locked during a contract renewal or formation. - AlertIDWalletLockedDuringMaintenance = "wallet-locked" - // AlertIDRenterAllowanceLowFunds is the id of the alert that is registered - // if at least one contract failed to renew/form due to low allowance. - AlertIDRenterAllowanceLowFunds = "low-funds" - // AlertIDRenterContractRenewalError is the id of the alert that is - // registered if at least once contract renewal or refresh failed. - AlertIDRenterContractRenewalError = "contract-renewal-error" - // AlertIDGatewayOffline is the id of the alert that is registered upon a - // call to 'gateway.Offline' if the value returned is 'false' and - // unregistered when it returns 'true'. - AlertIDGatewayOffline = "gateway-offline" -) - -type ( - // Alerter is the interface implemented by all top-level modules. It's an - // interface that allows for asking a module about potential issues. - Alerter interface { - Alerts() (crit, err, warn, info []Alert) - } - - // Alert is a type that contains essential information about an alert. - Alert struct { - // Cause is the cause for the Alert. - // e.g. "Wallet is locked" - Cause string `json:"cause"` - // Msg is the message the Alert is meant to convey to the user. - // e.g. "Contractor can't form new contracts". - Msg string `json:"msg"` - // Module contains information about what module the alert originated from. - Module string `json:"module"` - // Severity categorizes the Alerts to allow for an easy way to filter them. - Severity AlertSeverity `json:"severity"` - } - - // AlertID is a helper type for an Alert's ID. - AlertID string - - // AlertSeverity describes the severity of an alert. - AlertSeverity uint64 -) - -// Equals returns true if x and y are identical alerts. -func (x Alert) Equals(y Alert) bool { - return x.Module == y.Module && x.Cause == y.Cause && x.Msg == y.Msg && x.Severity == y.Severity -} - -// EqualsWithErrorCause returns true if x and y have the same module, message, -// and severity and if the provided error is in both of the alert's causes. -func (x Alert) EqualsWithErrorCause(y Alert, causeErr string) bool { - firstCheck := x.Module == y.Module && x.Msg == y.Msg && x.Severity == y.Severity - causeCheck := strings.Contains(x.Cause, causeErr) && strings.Contains(y.Cause, causeErr) - return firstCheck && causeCheck -} - -// MarshalJSON defines a JSON encoding for the AlertSeverity. -func (a AlertSeverity) MarshalJSON() ([]byte, error) { - switch a { - case SeverityInfo: - case SeverityWarning: - case SeverityError: - case SeverityCritical: - default: - return nil, errors.New("unknown AlertSeverity") - } - return json.Marshal(a.String()) -} - -// UnmarshalJSON attempts to decode an AlertSeverity. -func (a *AlertSeverity) UnmarshalJSON(b []byte) error { - var severityStr string - if err := json.Unmarshal(b, &severityStr); err != nil { - return err - } - switch severityStr { - case "info": - *a = SeverityInfo - case "warning": - *a = SeverityWarning - case "error": - *a = SeverityError - case "critical": - *a = SeverityCritical - default: - return fmt.Errorf("unknown severity '%v'", severityStr) - } - return nil -} - -// String converts an alertSeverity to a string -func (a AlertSeverity) String() string { - switch a { - case SeverityInfo: - return "info" - case SeverityWarning: - return "warning" - case SeverityError: - return "error" - case SeverityCritical: - return "critical" - case SeverityUnknown: - default: - } - return "unknown" -} - -// GenericAlerter implements the Alerter interface. It can be used as a helper -// type to implement the Alerter interface for modules and submodules. -type ( - GenericAlerter struct { - alerts map[AlertID]Alert - module string - mu sync.Mutex - } -) - -// NewAlerter creates a new alerter. -func NewAlerter(module string) *GenericAlerter { - a := &GenericAlerter{ - alerts: make(map[AlertID]Alert), - module: module, - } - return a -} - -// Alerts returns the current alerts tracked by the alerter. -func (a *GenericAlerter) Alerts() (crit, err, warn, info []Alert) { - a.mu.Lock() - defer a.mu.Unlock() - for _, alert := range a.alerts { - switch alert.Severity { - case SeverityInfo: - info = append(info, alert) - case SeverityCritical: - crit = append(crit, alert) - case SeverityError: - err = append(err, alert) - case SeverityWarning: - warn = append(warn, alert) - default: - fmt.Println("CRITICAL: Alerts: invalid severity", alert.Severity) - } - } - return -} - -// RegisterAlert adds an alert to the alerter. -func (a *GenericAlerter) RegisterAlert(id AlertID, msg, cause string, severity AlertSeverity) { - a.mu.Lock() - defer a.mu.Unlock() - a.alerts[id] = Alert{ - Cause: cause, - Module: a.module, - Msg: msg, - Severity: severity, - } -} - -// UnregisterAlert removes an alert from the alerter by id. -func (a *GenericAlerter) UnregisterAlert(id AlertID) { - a.mu.Lock() - defer a.mu.Unlock() - delete(a.alerts, id) -} - -// PrintAlerts is a helper function to print details of a slice of alerts -// with given severity description to command line. -func PrintAlerts(alerts []Alert, as AlertSeverity) { - fmt.Printf("\n There are %v %s alerts\n", len(alerts), as.String()) - for _, a := range alerts { - fmt.Printf(` ------------------- - Module: %s - Severity: %s - Message: %s - Cause: %s`, a.Module, a.Severity.String(), a.Msg, a.Cause) - } - fmt.Printf("\n------------------\n\n") -} diff --git a/modules/announce.go b/modules/announce.go index f869e3f..eeae6a6 100644 --- a/modules/announce.go +++ b/modules/announce.go @@ -87,3 +87,20 @@ func DecodeAnnouncement(fullAnnouncement []byte) (na NetAddress, pk types.Public return ha.NetAddress, pk, nil } + +// DecodeV2Announcement verifies a V2 host announcement against the signature. +func DecodeV2Announcement(at types.Attestation) (na string, pk types.PublicKey, err error) { + if at.Key != "HostAnnouncement" { + return "", types.PublicKey{}, errAnnNotAnnouncement + } + + pk = at.PublicKey + h := types.NewHasher() + at.EncodeTo(h.E) + annHash := h.Sum() + if ok := pk.VerifyHash(annHash, at.Signature); !ok { + return "", types.PublicKey{}, errAnnUnrecognizedSignature + } + + return string(at.Value), pk, nil +} diff --git a/modules/consensus.go b/modules/consensus.go deleted file mode 100644 index 35e9c2d..0000000 --- a/modules/consensus.go +++ /dev/null @@ -1,573 +0,0 @@ -package modules - -import ( - "encoding/binary" - "errors" - "math/big" - "time" - - "go.sia.tech/core/types" -) - -const ( - // ConsensusDir is the name of the directory used for all of the consensus - // persistence files. - ConsensusDir = "consensus" - - // DiffApply indicates that a diff is being applied to the consensus set. - DiffApply DiffDirection = true - - // DiffRevert indicates that a diff is being reverted from the consensus - // set. - DiffRevert DiffDirection = false -) - -var ( - // ConsensusChangeBeginning is a special consensus change id that tells the - // consensus set to provide all consensus changes starting from the very - // first diff, which includes the genesis block diff. - ConsensusChangeBeginning = ConsensusChangeID{} - - // ConsensusChangeRecent is a special consensus change id that tells the - // consensus set to provide the most recent consensus change, instead of - // starting from a specific value (which may not be known to the caller). - ConsensusChangeRecent = ConsensusChangeID{1} - - // ErrBlockKnown is an error indicating that a block is already in the - // database. - ErrBlockKnown = errors.New("block already present in database") - - // ErrBlockUnsolved indicates that a block did not meet the required POW - // target. - ErrBlockUnsolved = errors.New("block does not meet target") - - // ErrInvalidConsensusChangeID indicates that ConsensusSetPersistSubscribe - // was called with a consensus change id that is not recognized. Most - // commonly, this means that the consensus set was deleted or replaced and - // now the module attempting the subscription has desynchronized. This error - // should be handled by the module, and not reported to the user. - ErrInvalidConsensusChangeID = errors.New("consensus subscription has invalid id - files are inconsistent") - - // ErrNonExtendingBlock indicates that a block is valid but does not result - // in a fork that is the heaviest known fork - the consensus set has not - // changed as a result of seeing the block. - ErrNonExtendingBlock = errors.New("block does not extend the longest fork") -) - -type ( - // ConsensusChangeID is the id of a consensus change. - ConsensusChangeID types.Hash256 - - // A DiffDirection indicates the "direction" of a diff, either applied or - // reverted. A bool is used to restrict the value to these two possibilities. - DiffDirection bool - - // A ConsensusSetSubscriber is an object that receives updates to the consensus - // set every time there is a change in consensus. - ConsensusSetSubscriber interface { - // ProcessConsensusChange sends a consensus update to a module through - // a function call. Updates will always be sent in the correct order. - // There may not be any reverted blocks, but there will always be - // applied blocks. - ProcessConsensusChange(ConsensusChange) - } - - // ConsensusChangeDiffs is a collection of diffs caused by a single block. - // If the block was reverted, the individual diff directions are inverted. - // For example, a block that spends an output and creates a miner payout - // would have one SiacoinOutputDiff with direction DiffRevert and one - // DelayedSiacoinOutputDiff with direction DiffApply. If the same block - // were reverted, the SCOD would have direction DiffApply and the DSCOD - // would have direction DiffRevert. - ConsensusChangeDiffs struct { - SiacoinOutputDiffs []SiacoinOutputDiff - FileContractDiffs []FileContractDiff - SiafundOutputDiffs []SiafundOutputDiff - DelayedSiacoinOutputDiffs []DelayedSiacoinOutputDiff - SiafundPoolDiffs []SiafundPoolDiff - } - - // A ConsensusChange enumerates a set of changes that occurred to the consensus set. - ConsensusChange struct { - // ID is a unique id for the consensus change derived from the reverted - // and applied blocks. - ID ConsensusChangeID - - // BlockHeight is the height of the chain after all blocks included in - // this change have been reverted and applied. - BlockHeight uint64 - - // RevertedBlocks is the list of blocks that were reverted by the change. - // The reverted blocks were always all reverted before the applied blocks - // were applied. The revered blocks are presented in the order that they - // were reverted. - RevertedBlocks []types.Block - - // AppliedBlocks is the list of blocks that were applied by the change. The - // applied blocks are always all applied after all the reverted blocks were - // reverted. The applied blocks are presented in the order that they were - // applied. - AppliedBlocks []types.Block - - // RevertedDiffs is the set of diffs caused by reverted blocks. Each - // element corresponds to a block in RevertedBlocks. - RevertedDiffs []ConsensusChangeDiffs - - // AppliedDiffs is the set of diffs caused by applied blocks. Each - // element corresponds to a block in AppliedBlocks. - AppliedDiffs []ConsensusChangeDiffs - - // ConsensusChangeDiffs is the concatenation of all RevertedDiffs and - // AppliedDiffs. - ConsensusChangeDiffs - - // ChildTarget defines the target of any block that would be the child - // of the block most recently appended to the consensus set. - ChildTarget Target - - // MinimumValidChildTimestamp defines the minimum allowed timestamp for - // any block that is the child of the block most recently appended to - // the consensus set. - MinimumValidChildTimestamp time.Time - - // Synced indicates whether or not the ConsensusSet is synced with its - // peers. - Synced bool - - // TryTransactionSet is an unlocked version of - // ConsensusSet.TryTransactionSet. This allows the TryTransactionSet - // function to be called by a subscriber during - // ProcessConsensusChange. - TryTransactionSet func([]types.Transaction) (ConsensusChange, error) - } - - // A SiacoinOutputDiff indicates the addition or removal of a SiacoinOutput in - // the consensus set. - SiacoinOutputDiff struct { - Direction DiffDirection - ID types.SiacoinOutputID - SiacoinOutput types.SiacoinOutput - } - - // A FileContractDiff indicates the addition or removal of a FileContract in - // the consensus set. - FileContractDiff struct { - Direction DiffDirection - ID types.FileContractID - FileContract types.FileContract - } - - // A SiafundOutputDiff indicates the addition or removal of a SiafundOutput in - // the consensus set. - SiafundOutputDiff struct { - Direction DiffDirection - ID types.SiafundOutputID - SiafundOutput types.SiafundOutput - ClaimStart types.Currency - } - - // A DelayedSiacoinOutputDiff indicates the introduction of a siacoin output - // that cannot be spent until after maturing for 144 blocks. When the output - // has matured, a SiacoinOutputDiff will be provided. - DelayedSiacoinOutputDiff struct { - Direction DiffDirection - ID types.SiacoinOutputID - SiacoinOutput types.SiacoinOutput - MaturityHeight uint64 - } - - // A SiafundPoolDiff contains the value of the siafundPool before the block - // was applied, and after the block was applied. When applying the diff, set - // siafundPool to 'Adjusted'. When reverting the diff, set siafundPool to - // 'Previous'. - SiafundPoolDiff struct { - Direction DiffDirection - Previous types.Currency - Adjusted types.Currency - } - - // A ConsensusSet accepts blocks and builds an understanding of network - // consensus. - ConsensusSet interface { - Alerter - - // AcceptBlock adds a block to consensus. An error will be returned if the - // block is invalid, has been seen before, is an orphan, or doesn't - // contribute to the heaviest fork known to the consensus set. If the block - // does not become the head of the heaviest known fork but is otherwise - // valid, it will be remembered by the consensus set but an error will - // still be returned. - AcceptBlock(types.Block) error - - // BlockAtHeight returns the block found at the input height, with a - // bool to indicate whether that block exists. - BlockAtHeight(uint64) (types.Block, bool) - - // BlocksByID returns a block found for a given ID and its height, with - // a bool to indicate whether that block exists. - BlockByID(types.BlockID) (types.Block, uint64, bool) - - // ChildTarget returns the target required to extend the current heaviest - // fork. This function is typically used by miners looking to extend the - // heaviest fork. - ChildTarget(types.BlockID) (Target, bool) - - // Close will shut down the consensus set, giving the module enough time to - // run any required closing routines. - Close() error - - // ConsensusSetSubscribe adds a subscriber to the list of subscribers - // and gives them every consensus change that has occurred since the - // change with the provided id. There are a few special cases, - // described by the ConsensusChangeX variables in this package. - // A channel can be provided to abort the subscription process. - ConsensusSetSubscribe(ConsensusSetSubscriber, ConsensusChangeID, <-chan struct{}) error - - // CurrentBlock returns the latest block in the heaviest known - // blockchain. - CurrentBlock() types.Block - - // Height returns the current height of consensus. - Height() uint64 - - // Synced returns true if the consensus set is synced with the network. - Synced() bool - - // InCurrentPath returns true if the block id presented is found in the - // current path, false otherwise. - InCurrentPath(types.BlockID) bool - - // MinimumValidChildTimestamp returns the earliest timestamp that is - // valid on the current longest fork according to the consensus set. This is - // a required piece of information for the miner, who could otherwise be at - // risk of mining invalid blocks. - MinimumValidChildTimestamp(types.BlockID) (time.Time, bool) - - // StorageProofSegment returns the segment to be used in the storage proof for - // a given file contract. - StorageProofSegment(types.FileContractID) (uint64, error) - - // FoundationUnlockHashes returns the current primary and failsafe - // Foundation UnlockHashes. - FoundationUnlockHashes() (primary, failsafe types.Address) - - // TryTransactionSet checks whether the transaction set would be valid if - // it were added in the next block. A consensus change is returned - // detailing the diffs that would result from the application of the - // transaction. - TryTransactionSet([]types.Transaction) (ConsensusChange, error) - - // Unsubscribe removes a subscriber from the list of subscribers, - // allowing for garbage collection and rescanning. If the subscriber is - // not found in the subscriber database, no action is taken. - Unsubscribe(ConsensusSetSubscriber) - } -) - -// AppendDiffs appends a set of diffs to cc. -func (cc *ConsensusChange) AppendDiffs(diffs ConsensusChangeDiffs) { - cc.SiacoinOutputDiffs = append(cc.SiacoinOutputDiffs, diffs.SiacoinOutputDiffs...) - cc.FileContractDiffs = append(cc.FileContractDiffs, diffs.FileContractDiffs...) - cc.SiafundOutputDiffs = append(cc.SiafundOutputDiffs, diffs.SiafundOutputDiffs...) - cc.DelayedSiacoinOutputDiffs = append(cc.DelayedSiacoinOutputDiffs, diffs.DelayedSiacoinOutputDiffs...) - cc.SiafundPoolDiffs = append(cc.SiafundPoolDiffs, diffs.SiafundPoolDiffs...) -} - -// InitialHeight returns the height of the consensus before blocks are applied. -func (cc *ConsensusChange) InitialHeight() uint64 { - if cc.BlockHeight == 0 { - return 0 - } - return cc.BlockHeight - uint64(len(cc.AppliedBlocks)) -} - -// EncodeTo implements types.EncoderTo. -func (cc *ConsensusChange) EncodeTo(e *types.Encoder) { - e.Write(cc.ID[:]) - e.WritePrefix(len(cc.RevertedBlocks)) - for _, rb := range cc.RevertedBlocks { - rb.EncodeTo(e) - } - e.WritePrefix(len(cc.AppliedBlocks)) - for _, ab := range cc.AppliedBlocks { - ab.EncodeTo(e) - } - e.WritePrefix(len(cc.RevertedDiffs)) - for _, rd := range cc.RevertedDiffs { - rd.EncodeTo(e) - } - e.WritePrefix(len(cc.AppliedDiffs)) - for _, ad := range cc.AppliedDiffs { - ad.EncodeTo(e) - } - e.Write(cc.ChildTarget[:]) - e.WriteTime(cc.MinimumValidChildTimestamp) - e.WriteBool(cc.Synced) -} - -// EncodeTo implements types.EncoderTo. -func (ccd *ConsensusChangeDiffs) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(ccd.SiacoinOutputDiffs)) - for _, sod := range ccd.SiacoinOutputDiffs { - sod.EncodeTo(e) - } - e.WritePrefix(len(ccd.FileContractDiffs)) - for _, fcd := range ccd.FileContractDiffs { - fcd.EncodeTo(e) - } - e.WritePrefix(len(ccd.SiafundOutputDiffs)) - for _, sfd := range ccd.SiafundOutputDiffs { - sfd.EncodeTo(e) - } - e.WritePrefix(len(ccd.DelayedSiacoinOutputDiffs)) - for _, dsod := range ccd.DelayedSiacoinOutputDiffs { - dsod.EncodeTo(e) - } - e.WritePrefix(len(ccd.SiafundPoolDiffs)) - for _, spd := range ccd.SiafundPoolDiffs { - spd.EncodeTo(e) - } -} - -// EncodeTo implements types.EncoderTo. -func (sod *SiacoinOutputDiff) EncodeTo(e *types.Encoder) { - e.WriteBool(bool(sod.Direction)) - sod.ID.EncodeTo(e) - sod.SiacoinOutput.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (fcd *FileContractDiff) EncodeTo(e *types.Encoder) { - e.WriteBool(bool(fcd.Direction)) - fcd.ID.EncodeTo(e) - fcd.FileContract.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (sfd *SiafundOutputDiff) EncodeTo(e *types.Encoder) { - e.WriteBool(bool(sfd.Direction)) - sfd.ID.EncodeTo(e) - types.NewCurrency64(sfd.SiafundOutput.Value).EncodeTo(e) - sfd.SiafundOutput.Address.EncodeTo(e) - sfd.ClaimStart.EncodeTo(e) -} - -// EncodeTo implements types.EncoderTo. -func (dsod *DelayedSiacoinOutputDiff) EncodeTo(e *types.Encoder) { - e.WriteBool(bool(dsod.Direction)) - dsod.ID.EncodeTo(e) - dsod.SiacoinOutput.EncodeTo(e) - e.WriteUint64(dsod.MaturityHeight) -} - -// EncodeTo implements types.EncoderTo. -func (spd *SiafundPoolDiff) EncodeTo(e *types.Encoder) { - e.WriteBool(bool(spd.Direction)) - spd.Previous.EncodeTo(e) - spd.Adjusted.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom. -func (cc *ConsensusChange) DecodeFrom(d *types.Decoder) { - d.Read(cc.ID[:]) - l := d.ReadPrefix() - cc.RevertedBlocks = make([]types.Block, l) - for i := 0; i < l; i++ { - cc.RevertedBlocks[i].DecodeFrom(d) - } - l = d.ReadPrefix() - cc.AppliedBlocks = make([]types.Block, l) - for i := 0; i < l; i++ { - cc.AppliedBlocks[i].DecodeFrom(d) - } - l = d.ReadPrefix() - cc.RevertedDiffs = make([]ConsensusChangeDiffs, l) - for i := 0; i < l; i++ { - cc.RevertedDiffs[i].DecodeFrom(d) - } - l = d.ReadPrefix() - cc.AppliedDiffs = make([]ConsensusChangeDiffs, l) - for i := 0; i < l; i++ { - cc.AppliedDiffs[i].DecodeFrom(d) - } - d.Read(cc.ChildTarget[:]) - cc.MinimumValidChildTimestamp = d.ReadTime() - cc.Synced = d.ReadBool() -} - -// DecodeFrom implements types.DecoderFrom. -func (ccd *ConsensusChangeDiffs) DecodeFrom(d *types.Decoder) { - l := d.ReadPrefix() - ccd.SiacoinOutputDiffs = make([]SiacoinOutputDiff, l) - for i := 0; i < l; i++ { - ccd.SiacoinOutputDiffs[i].DecodeFrom(d) - } - l = d.ReadPrefix() - ccd.FileContractDiffs = make([]FileContractDiff, l) - for i := 0; i < l; i++ { - ccd.FileContractDiffs[i].DecodeFrom(d) - } - l = d.ReadPrefix() - ccd.SiafundOutputDiffs = make([]SiafundOutputDiff, l) - for i := 0; i < l; i++ { - ccd.SiafundOutputDiffs[i].DecodeFrom(d) - } - l = d.ReadPrefix() - ccd.DelayedSiacoinOutputDiffs = make([]DelayedSiacoinOutputDiff, l) - for i := 0; i < l; i++ { - ccd.DelayedSiacoinOutputDiffs[i].DecodeFrom(d) - } - l = d.ReadPrefix() - ccd.SiafundPoolDiffs = make([]SiafundPoolDiff, l) - for i := 0; i < l; i++ { - ccd.SiafundPoolDiffs[i].DecodeFrom(d) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (sod *SiacoinOutputDiff) DecodeFrom(d *types.Decoder) { - sod.Direction = DiffDirection(d.ReadBool()) - sod.ID.DecodeFrom(d) - sod.SiacoinOutput.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (fcd *FileContractDiff) DecodeFrom(d *types.Decoder) { - fcd.Direction = DiffDirection(d.ReadBool()) - fcd.ID.DecodeFrom(d) - fcd.FileContract.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (sfd *SiafundOutputDiff) DecodeFrom(d *types.Decoder) { - sfd.Direction = DiffDirection(d.ReadBool()) - sfd.ID.DecodeFrom(d) - var val types.Currency - val.DecodeFrom(d) - sfd.SiafundOutput.Value = val.Lo - sfd.SiafundOutput.Address.DecodeFrom(d) - sfd.ClaimStart.DecodeFrom(d) -} - -// DecodeFrom implements types.DecoderFrom. -func (dsod *DelayedSiacoinOutputDiff) DecodeFrom(d *types.Decoder) { - dsod.Direction = DiffDirection(d.ReadBool()) - dsod.ID.DecodeFrom(d) - dsod.SiacoinOutput.DecodeFrom(d) - dsod.MaturityHeight = d.ReadUint64() -} - -// DecodeFrom implements types.DecoderFrom. -func (spd *SiafundPoolDiff) DecodeFrom(d *types.Decoder) { - spd.Direction = DiffDirection(d.ReadBool()) - spd.Previous.DecodeFrom(d) - spd.Adjusted.DecodeFrom(d) -} - -// String returns the ConsensusChangeID as a string. -func (ccID ConsensusChangeID) String() string { - return types.Hash256(ccID).String() -} - -type ( - // A Target is a hash that a block's ID must be "less than" in order for - // the block to be considered valid. Miners vary the block's 'Nonce' field - // in order to brute-force such an ID. The inverse of a Target is called - // the "difficulty," because it is proportional to the amount of time - // required to brute-force the Target. - Target types.Hash256 -) - -// AddDifficulties returns the resulting target with the difficulty of 'x' and -// 'y' are added together. Note that the difficulty is the inverse of the -// target. The sum is defined by: -// -// sum(x, y) = 1/(1/x + 1/y) -func (x Target) AddDifficulties(y Target) (t Target) { - sumDifficulty := new(big.Rat).Add(x.Inverse(), y.Inverse()) - return RatToTarget(new(big.Rat).Inv(sumDifficulty)) -} - -// Cmp compares the difficulties of two targets. Note that the difficulty is -// the inverse of the target. The results are as follows: -// -// -1 if x < y -// 0 if x == y -// +1 if x > y -func (x Target) Cmp(y Target) int { - return x.Int().Cmp(y.Int()) -} - -// Difficulty returns the difficulty associated with a given target. -func (x Target) Difficulty() types.Currency { - buf := make([]byte, 16) - if x == (Target{}) { - rb := RootDepth.Int().Bytes() - copy(buf[16 - len(rb):], rb[:]) - return types.NewCurrency(binary.BigEndian.Uint64(buf[8:]), binary.BigEndian.Uint64(buf[:8])) - } - b := new(big.Int).Div(RootDepth.Int(), x.Int()).Bytes() - copy(buf[16 - len(b):], b[:]) - return types.NewCurrency(binary.BigEndian.Uint64(buf[8:]), binary.BigEndian.Uint64(buf[:8])) -} - -// Int converts a Target to a big.Int. -func (x Target) Int() *big.Int { - return new(big.Int).SetBytes(x[:]) -} - -// IntToTarget converts a big.Int to a Target. -func IntToTarget(i *big.Int) (t Target) { - // Check for negatives. - if i.Sign() < 0 { - return Target{} - } else { - // In the event of overflow, return the maximum. - if i.BitLen() > 256 { - return RootDepth - } - b := i.Bytes() - offset := len(t[:]) - len(b) - copy(t[offset:], b) - } - return -} - -// Inverse returns the inverse of a Target as a big.Rat -func (x Target) Inverse() *big.Rat { - return new(big.Rat).Inv(x.Rat()) -} - -// MulDifficulty multiplies the difficulty of a target by y. The product is defined by: -// y / x -func (x Target) MulDifficulty(y *big.Rat) (t Target) { - product := new(big.Rat).Mul(y, x.Inverse()) - product = product.Inv(product) - return RatToTarget(product) -} - -// Rat converts a Target to a big.Rat. -func (x Target) Rat() *big.Rat { - return new(big.Rat).SetInt(x.Int()) -} - -// RatToTarget converts a big.Rat to a Target. -func RatToTarget(r *big.Rat) (t Target) { - if r.Num().Sign() < 0 { - return Target{} - } else { - i := new(big.Int).Div(r.Num(), r.Denom()) - t = IntToTarget(i) - } - return -} - -// SubtractDifficulties returns the resulting target with the difficulty of 'x' -// is subtracted from the target with difficulty 'y'. Note that the difficulty -// is the inverse of the target. The difference is defined by: -// -// sum(x, y) = 1/(1/x - 1/y) -func (x Target) SubtractDifficulties(y Target) (t Target) { - sumDifficulty := new(big.Rat).Sub(x.Inverse(), y.Inverse()) - return RatToTarget(new(big.Rat).Inv(sumDifficulty)) -} diff --git a/modules/consensus/accept.go b/modules/consensus/accept.go deleted file mode 100644 index 16bcc12..0000000 --- a/modules/consensus/accept.go +++ /dev/null @@ -1,342 +0,0 @@ -package consensus - -import ( - "bytes" - "database/sql" - "errors" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - errDoSBlock = errors.New("block is known to be invalid") - errDatabaseError = errors.New("error querying database") - errNonLinearChain = errors.New("block set is not a contiguous chain") - errOrphan = errors.New("block has no known parent") -) - -// managedBroadcastBlock will broadcast a block to the consensus set's peers. -func (cs *ConsensusSet) managedBroadcastBlock(b types.Block) { - // Broadcast the block header to all peers. - go cs.gateway.Broadcast("RelayHeader", b.Header(), cs.gateway.Peers()) -} - -// validateHeaderAndBlock does some early, low computation verification on the -// block. Callers should not assume that validation will happen in a particular -// order. -func (cs *ConsensusSet) validateHeaderAndBlock(tx *sql.Tx, b types.Block, id types.BlockID) (parent *processedBlock, err error) { - // Check if the block is a DoS block - a known invalid block that is expensive - // to validate. - exists, err := checkDoSBlock(tx, id) - if err != nil { - return nil, err - } - if exists { - return nil, errDoSBlock - } - - // Check if the block is already known. - _, _, err = findBlockByID(tx, id) - if err != nil { - return nil, errDatabaseError - } - - // Check for the parent. - parent, exists, err = findBlockByID(tx, b.ParentID) - if err != nil { - return nil, errDatabaseError - } - if !exists { - return nil, errOrphan - } - - // Check that the timestamp is not too far in the past to be acceptable. - minTimestamp := cs.minimumValidChildTimestamp(tx, parent) - - err = cs.validateBlock(b, id, minTimestamp, parent.ChildTarget, parent.Height+1) - if err != nil { - return nil, err - } - return parent, nil -} - -// checkHeaderTarget returns true if the header's ID meets the given target. -func (cs *ConsensusSet) checkHeaderTarget(h types.BlockHeader, target modules.Target) bool { - blockHash := h.ID() - return bytes.Compare(target[:], blockHash[:]) >= 0 -} - -// validateHeader does some early, low computation verification on the header -// to determine if the block should be downloaded. Callers should not assume -// that validation will happen in a particular order. -func (cs *ConsensusSet) validateHeader(tx *sql.Tx, h types.BlockHeader) error { - // Check if the block is a DoS block - a known invalid block that is expensive - // to validate. - id := h.ID() - exists, err := checkDoSBlock(tx, id) - if err != nil { - return err - } - if exists { - return errDoSBlock - } - - // Check if the block is already known. - _, _, err = findBlockByID(tx, id) - if err != nil { - return errDatabaseError - } - - // Check for the parent. - parent, exists, err := findBlockByID(tx, h.ParentID) - if err != nil { - return errDatabaseError - } - if !exists { - return errOrphan - } - - // Check that the nonce is a legal nonce. - if parent.Height+1 >= modules.ASICHardforkHeight && h.Nonce%modules.ASICHardforkFactor != 0 { - return errors.New("block does not meet nonce requirements") - } - // Check that the target of the new block is sufficient. - if !cs.checkHeaderTarget(h, parent.ChildTarget) { - return modules.ErrBlockUnsolved - } - - // TODO: check if the block is a non extending block once headers-first - // downloads are implemented. - - // Check that the timestamp is not too far in the past to be acceptable. - minTimestamp := cs.minimumValidChildTimestamp(tx, parent) - if minTimestamp.After(h.Timestamp) { - return ErrEarlyTimestamp - } - - // Check if the block is in the extreme future. We make a distinction between - // future and extreme future because there is an assumption that by the time - // the extreme future arrives, this block will no longer be a part of the - // longest fork because it will have been ignored by all of the miners. - if h.Timestamp.Unix() > types.CurrentTimestamp().Unix()+modules.ExtremeFutureThreshold { - return ErrExtremeFutureTimestamp - } - - // We do not check if the header is in the near future here, because we want - // to get the corresponding block as soon as possible, even if the block is in - // the near future. - - return nil -} - -// addBlockToTree inserts a block into the blockNode tree by adding it to its -// parent's list of children. If the new blockNode is heavier than the current -// node, the blockchain is forked to put the new block and its parents at the -// tip. An error will be returned if block verification fails or if the block -// does not extend the longest fork. -// -// addBlockToTree might need to modify the database while returning an error -// on the block. Such errors are handled outside by the caller. -func (cs *ConsensusSet) addBlockToTree(tx *sql.Tx, b types.Block, parent *processedBlock) (ce changeEntry, err error) { - // Prepare the child processed block associated with the parent block. - newNode, err := cs.newChild(tx, parent, b) - if err != nil { - return - } - - // Check whether the new node is part of a chain that is heavier than the - // current node. If not, return ErrNonExtending and don't fork the - // blockchain. - currentNode := currentProcessedBlock(tx) - if !newNode.heavierThan(currentNode) { - return changeEntry{}, modules.ErrNonExtendingBlock - } - - // Fork the blockchain and put the new heaviest block at the tip of the - // chain. - var revertedBlocks, appliedBlocks []*processedBlock - revertedBlocks, appliedBlocks, err = cs.forkBlockchain(tx, newNode) - if err != nil { - return changeEntry{}, err - } - for _, rn := range revertedBlocks { - ce.RevertedBlocks = append(ce.RevertedBlocks, rn.Block.ID()) - } - for _, an := range appliedBlocks { - ce.AppliedBlocks = append(ce.AppliedBlocks, an.Block.ID()) - } - err = appendChangeLog(tx, ce) - if err != nil { - return changeEntry{}, err - } - - return ce, nil -} - -// threadedSleepOnFutureBlock will sleep until the timestamp of a future block -// has arrived. -// -// TODO: An attacker can broadcast a future block multiple times, resulting in a -// goroutine spinup for each future block. Need to prevent that. -// -// TODO: An attacker could produce a very large number of future blocks, -// consuming memory. Need to prevent that. -func (cs *ConsensusSet) threadedSleepOnFutureBlock(b types.Block) { - // Add this thread to the threadgroup. - err := cs.tg.Add() - if err != nil { - return - } - defer cs.tg.Done() - - // Perform a soft-sleep while we wait for the block to become valid. - select { - case <-cs.tg.StopChan(): - return - case <-time.After(time.Duration(b.Timestamp.Unix()-(types.CurrentTimestamp().Unix()+modules.FutureThreshold)) * time.Second): - _, err := cs.managedAcceptBlocks([]types.Block{b}) - if err != nil { - cs.log.Println("WARN: failed to accept a future block:", err) - } - cs.managedBroadcastBlock(b) - } -} - -// managedAcceptBlocks will try to add blocks to the consensus set. If the -// blocks do not extend the longest currently known chain, an error is -// returned but the blocks are still kept in memory. If the blocks extend a fork -// such that the fork becomes the longest currently known chain, the consensus -// set will reorganize itself to recognize the new longest fork. Accepted -// blocks are not relayed. -// -// Typically AcceptBlock should be used so that the accepted block is relayed. -// This method is typically only be used when there would otherwise be multiple -// consecutive calls to AcceptBlock with each successive call accepting the -// child block of the previous call. -func (cs *ConsensusSet) managedAcceptBlocks(blocks []types.Block) (blockchainExtended bool, err error) { - // Grab a lock on the consensus set. - cs.mu.Lock() - defer cs.mu.Unlock() - - // Make sure that blocks are consecutive. Though this isn't a strict - // requirement, if blocks are not consecutive then it becomes a lot harder - // to maintain correctness when adding multiple blocks. - // - // This is the first time that IDs on the blocks have been computed. - blockIDs := make([]types.BlockID, 0, len(blocks)) - for i := 0; i < len(blocks); i++ { - blockIDs = append(blockIDs, blocks[i].ID()) - if i > 0 && blocks[i].ParentID != blockIDs[i-1] { - return false, errNonLinearChain - } - } - - // Verify the headers for every block, throw out known blocks, and the - // invalid blocks (which includes the children of invalid blocks). - chainExtended := false - changes := make([]changeEntry, 0, len(blocks)) - tx, err := cs.db.Begin() - if err != nil { - return false, err - } - setErr := func(tx *sql.Tx) error { - for i := 0; i < len(blocks); i++ { - // Start by checking the header of the block. - parent, err := cs.validateHeaderAndBlock(tx, blocks[i], blockIDs[i]) - if modules.ContainsError(err, ErrFutureTimestamp) { - // Queue the block to be tried again if it is a future block. - go cs.threadedSleepOnFutureBlock(blocks[i]) - } - if err != nil { - return err - } - - // Try adding the block to consensus. - changeEntry, err := cs.addBlockToTree(tx, blocks[i], parent) - if err == nil { - changes = append(changes, changeEntry) - chainExtended = true - } - if modules.ContainsError(err, modules.ErrNonExtendingBlock) { - err = nil - } - if err != nil { - return err - } - - // Sanity check - we should never apply fewer blocks than we revert. - if len(changeEntry.AppliedBlocks) < len(changeEntry.RevertedBlocks) { - err := errors.New("after adding a change entry, there are more reverted blocks than applied ones") - cs.log.Severe(err) - return err - } - } - - return nil - }(tx) - - if setErr != nil { - if errors.Is(setErr, errExternalRevert) { - block := currentProcessedBlock(tx) - cs.log.Println("Reverting an invalid block:", block.Height, block.Block.ID()) - if err := commitDiffSet(tx, block, modules.DiffRevert); err != nil { - return false, err - } - return false, tx.Commit() - } - if len(changes) == 0 { - cs.log.Println("Consensus received an invalid block:", setErr) - } else { - cs.log.Println("Consensus received a chain of blocks, where one was valid, but others were not:", setErr) - } - tx.Rollback() - return false, setErr - } - - // Stop here if the blocks did not extend the longest blockchain. - if !chainExtended { - if err := tx.Commit(); err != nil { - return false, err - } - return false, modules.ErrNonExtendingBlock - } - - // Commit all changes before updating the subscribers. - if err := tx.Commit(); err != nil { - return false, err - } - - // Send any changes to subscribers. - for i := 0; i < len(changes); i++ { - cs.updateSubscribers(changes[i]) - } - - return chainExtended, nil -} - -// AcceptBlock will try to add a block to the consensus set. If the block does -// not extend the longest currently known chain, an error is returned but the -// block is still kept in memory. If the block extends a fork such that the -// fork becomes the longest currently known chain, the consensus set will -// reorganize itself to recognize the new longest fork. If a block is accepted -// without error, it will be relayed to all connected peers. This function -// should only be called for new blocks. -func (cs *ConsensusSet) AcceptBlock(b types.Block) error { - err := cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - - chainExtended, err := cs.managedAcceptBlocks([]types.Block{b}) - if err != nil { - return err - } - if chainExtended { - cs.managedBroadcastBlock(b) - } - return nil -} diff --git a/modules/consensus/alert.go b/modules/consensus/alert.go deleted file mode 100644 index 6158280..0000000 --- a/modules/consensus/alert.go +++ /dev/null @@ -1,10 +0,0 @@ -package consensus - -import ( - "github.com/mike76-dev/sia-satellite/modules" -) - -// Alerts implements the Alerter interface for the consensusset. -func (c *ConsensusSet) Alerts() (crit, err, warn, info []modules.Alert) { - return -} diff --git a/modules/consensus/block_rules.go b/modules/consensus/block_rules.go deleted file mode 100644 index 7e0f5c2..0000000 --- a/modules/consensus/block_rules.go +++ /dev/null @@ -1,61 +0,0 @@ -package consensus - -import ( - "database/sql" - "sort" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -type ( - // timestampSlice is an array of timestamps. - timestampSlice []time.Time -) - -// Len is part of sort.Interface. -func (ts timestampSlice) Len() int { - return len(ts) -} - -// Less is part of sort.Interface. -func (ts timestampSlice) Less(i, j int) bool { - return ts[i].Before(ts[j]) -} - -// Swap is part of sort.Interface. -func (ts timestampSlice) Swap(i, j int) { - ts[i], ts[j] = ts[j], ts[i] -} - -// minimumValidChildTimestamp returns the earliest timestamp that a child node -// can have while still being valid. -func (cs *ConsensusSet) minimumValidChildTimestamp(tx *sql.Tx, pb *processedBlock) time.Time { - // Get the previous MedianTimestampWindow timestamps. - windowTimes := make(timestampSlice, modules.MedianTimestampWindow) - windowTimes[0] = pb.Block.Timestamp - parentID := pb.Block.ParentID - var timestamp time.Time - var err error - for i := uint64(1); i < modules.MedianTimestampWindow; i++ { - // If the genesis block is 'parent', use the genesis block timestamp - // for all remaining times. - if parentID == (types.BlockID{}) { - windowTimes[i] = windowTimes[i - 1] - continue - } - - // Get the next parent. - parentID, timestamp, err = getParentID(tx, parentID) - if err != nil { - cs.log.Println("ERROR: unable to get parent ID:", err) - } - windowTimes[i] = timestamp - } - sort.Sort(windowTimes) - - // Return the median of the sorted timestamps. - return windowTimes[len(windowTimes) / 2] -} diff --git a/modules/consensus/block_validation.go b/modules/consensus/block_validation.go deleted file mode 100644 index e17d034..0000000 --- a/modules/consensus/block_validation.go +++ /dev/null @@ -1,97 +0,0 @@ -package consensus - -import ( - "bytes" - "errors" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - // ErrBadMinerPayouts is returned when the miner payout does not equal the - // block subsidy. - ErrBadMinerPayouts = errors.New("miner payout sum does not equal block subsidy") - // ErrEarlyTimestamp is returned when the block's timestamp is too early. - ErrEarlyTimestamp = errors.New("block timestamp is too early") - // ErrExtremeFutureTimestamp is returned when the block's timestamp is too - // far in the future. - ErrExtremeFutureTimestamp = errors.New("block timestamp too far in future, discarded") - // ErrFutureTimestamp is returned when the block's timestamp is too far in - // the future to be used now but it's saved for future use. - ErrFutureTimestamp = errors.New("block timestamp too far in future, but saved for later use") - // ErrLargeBlock is returned when the block is too large to be accepted. - ErrLargeBlock = errors.New("block is too large to be accepted") -) - -// checkMinerPayouts compares a block's miner payouts to the block's subsidy and -// returns true if they are equal. -func checkMinerPayouts(b types.Block, height uint64) bool { - // Add up the payouts and check that all values are legal. - var payoutSum types.Currency - for _, payout := range b.MinerPayouts { - if payout.Value.IsZero() { - return false - } - payoutSum = payoutSum.Add(payout.Value) - } - return modules.CalculateSubsidy(b, height).Equals(payoutSum) -} - -// checkTarget returns true if the block's ID meets the given target. -func checkTarget(b types.Block, id types.BlockID, target modules.Target) bool { - return bytes.Compare(target[:], id[:]) >= 0 -} - -// validateBlock validates a block against a minimum timestamp, a block target, -// and a block height. Returns nil if the block is valid and an appropriate -// error otherwise. -func (cs *ConsensusSet) validateBlock(b types.Block, id types.BlockID, minTimestamp time.Time, target modules.Target, height uint64) error { - // Check that the timestamp is not too far in the past to be acceptable. - if minTimestamp.After(b.Timestamp) { - return ErrEarlyTimestamp - } - - // Check that the nonce is a legal nonce. - if height >= modules.ASICHardforkHeight && b.Nonce % modules.ASICHardforkFactor != 0 { - return errors.New("block does not meet nonce requirements") - } - // Check that the target of the new block is sufficient. - if !checkTarget(b, id, target) { - return modules.ErrBlockUnsolved - } - - // Check that the block is below the size limit. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - b.EncodeTo(e) - e.Flush() - blockSize := buf.Len() - if uint64(blockSize) > modules.BlockSizeLimit { - return ErrLargeBlock - } - - // Check if the block is in the extreme future. We make a distinction between - // future and extreme future because there is an assumption that by the time - // the extreme future arrives, this block will no longer be a part of the - // longest fork because it will have been ignored by all of the miners. - if b.Timestamp.Unix() > time.Now().Unix() + modules.ExtremeFutureThreshold { - return ErrExtremeFutureTimestamp - } - - // Verify that the miner payouts are valid. - if !checkMinerPayouts(b, height) { - return ErrBadMinerPayouts - } - - // Check if the block is in the near future, but too far to be acceptable. - // This is the last check because it's an expensive check, and not worth - // performing if the payouts are incorrect. - if b.Timestamp.Unix() > time.Now().Unix() + modules.FutureThreshold { - return ErrFutureTimestamp - } - - return nil -} diff --git a/modules/consensus/changelog.go b/modules/consensus/changelog.go deleted file mode 100644 index dcf9ba6..0000000 --- a/modules/consensus/changelog.go +++ /dev/null @@ -1,145 +0,0 @@ -package consensus - -import ( - "database/sql" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -type ( - // changeEntry records a single atomic change to the consensus set. - changeEntry struct { - RevertedBlocks []types.BlockID - AppliedBlocks []types.BlockID - } - - // changeNode contains a change entry and a pointer to the next change - // entry, and is the object that gets stored in the database. - changeNode struct { - Entry changeEntry - Next modules.ConsensusChangeID - } -) - -// EncodeTo implements types.EncoderTo. -func (ce *changeEntry) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(ce.RevertedBlocks)) - for _, rb := range ce.RevertedBlocks { - rb.EncodeTo(e) - } - e.WritePrefix(len(ce.AppliedBlocks)) - for _, ab := range ce.AppliedBlocks { - ab.EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (ce *changeEntry) DecodeFrom(d *types.Decoder) { - ce.RevertedBlocks = make([]types.BlockID, d.ReadUint64()) - for i := 0; i < len(ce.RevertedBlocks); i++ { - ce.RevertedBlocks[i].DecodeFrom(d) - } - ce.AppliedBlocks = make([]types.BlockID, d.ReadUint64()) - for i := 0; i < len(ce.AppliedBlocks); i++ { - ce.AppliedBlocks[i].DecodeFrom(d) - } -} - -// EncodeTo implements types.EncoderTo. -func (cn *changeNode) EncodeTo(e *types.Encoder) { - cn.Entry.EncodeTo(e) - e.Write(cn.Next[:]) -} - -// DecodeFrom implements types.DecoderFrom. -func (cn *changeNode) DecodeFrom(d *types.Decoder) { - cn.Entry.DecodeFrom(d) - d.Read(cn.Next[:]) -} - -// appendChangeLog adds a new change entry to the change log. -func appendChangeLog(tx *sql.Tx, ce changeEntry) error { - // Insert the change entry. - ceid := ce.ID() - cn := changeNode{Entry: ce, Next: modules.ConsensusChangeID{}} - err := saveConsensusChange(tx, ceid, cn) - if err != nil { - return err - } - - // Update the tail node to point to the new change entry as the next entry. - tailID := changeLogTailID(tx) - if tailID != (modules.ConsensusChangeID{}) { - // Get the old tail node. - tailCN, err := loadConsensusChange(tx, tailID) - if err != nil { - return err - } - - // Point the 'next' of the old tail node to the new tail node and - // insert. - tailCN.Next = ceid - err = saveConsensusChange(tx, tailID, tailCN) - if err != nil { - return err - } - } - - // Update the tail id. - return setChangeLogTailID(tx, ceid) -} - -// getEntry returns the change entry with a given id, using a bool to indicate -// existence. -func getEntry(tx *sql.Tx, id modules.ConsensusChangeID) (ce changeEntry, exists bool) { - cn, err := loadConsensusChange(tx, id) - if err != nil { - return changeEntry{}, false - } - return cn.Entry, true -} - -// ID returns the id of a change entry. -func (ce *changeEntry) ID() modules.ConsensusChangeID { - h := types.NewHasher() - ce.EncodeTo(h.E) - return modules.ConsensusChangeID(h.Sum()) -} - -// NextEntry returns the entry after the current entry. -func (ce *changeEntry) NextEntry(tx *sql.Tx) (ne changeEntry, exists bool) { - ceid := ce.ID() - cn, err := loadConsensusChange(tx, ceid) - if err != nil { - return changeEntry{}, false - } - return getEntry(tx, cn.Next) -} - -// createChangeLog assumes that no change log exists and creates a new one. -func (cs *ConsensusSet) createChangeLog(tx *sql.Tx) error { - // Add the genesis block as the first entry of the change log. - ge := cs.genesisEntry() - geid := ge.ID() - cn := changeNode{ - Entry: ge, - Next: modules.ConsensusChangeID{}, - } - - err := saveConsensusChange(tx, geid, cn) - if err != nil { - return err - } - - // Update the tail id. - return setChangeLogTailID(tx, geid) -} - -// genesisEntry returns the id of the genesis block log entry. -func (cs *ConsensusSet) genesisEntry() changeEntry { - return changeEntry{ - AppliedBlocks: []types.BlockID{cs.blockRoot.Block.ID()}, - } -} diff --git a/modules/consensus/consensusset.go b/modules/consensus/consensusset.go deleted file mode 100644 index bf4b036..0000000 --- a/modules/consensus/consensusset.go +++ /dev/null @@ -1,441 +0,0 @@ -package consensus - -import ( - "database/sql" - "errors" - "sync" - "time" - - siasync "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - - "go.sia.tech/core/types" -) - -var ( - errNilDB = errors.New("cannot have a nil database as input") - errNilGateway = errors.New("cannot have a nil gateway as input") -) - -// The ConsensusSet is the object responsible for tracking the current status -// of the blockchain. Broadly speaking, it is responsible for maintaining -// consensus. It accepts blocks and constructs a blockchain, forking when -// necessary. -type ConsensusSet struct { - // The gateway manages peer connections and keeps the consensus set - // synchronized to the rest of the network. - gateway modules.Gateway - - // The block root contains the genesis block. - blockRoot processedBlock - - // Subscribers to the consensus set will receive a changelog every time - // there is an update to the consensus set. At initialization, they receive - // all changes that they are missing. - // - // Memory: A consensus set typically has fewer than 10 subscribers, and - // subscription typically happens entirely at startup. This slice is - // unlikely to grow beyond 1kb, and cannot by manipulated by an attacker as - // the function of adding a subscriber should not be exposed. - subscribers []modules.ConsensusSetSubscriber - - // checkingConsistency is a bool indicating whether or not a consistency - // check is in progress. The consistency check logic call itself, resulting - // in infinite loops. This bool prevents that while still allowing for full - // granularity consistency checks. Previously, consistency checks were only - // performed after a full reorg, but now they are performed after every - // block. - checkingConsistency bool - - // synced is true if initial blockchain download has finished. It indicates - // whether the consensus set is synced with the network. - synced bool - - // Utilities. - db *sql.DB - log *persist.Logger - mu sync.RWMutex - tg siasync.ThreadGroup -} - -// consensusSetBlockingStartup handles the blocking portion of New. -func consensusSetBlockingStartup(gateway modules.Gateway, db *sql.DB, dir string) (*ConsensusSet, error) { - // Check for nil dependencies. - if db == nil { - return nil, errNilDB - } - if gateway == nil { - return nil, errNilGateway - } - - // Create the ConsensusSet object. - cs := &ConsensusSet{ - gateway: gateway, - db: db, - - blockRoot: processedBlock{ - Block: modules.GenesisBlock, - ChildTarget: modules.RootTarget, - Depth: modules.RootDepth, - - DiffsGenerated: true, - }, - } - - // Create the diffs for the genesis transaction outputs. - for _, transaction := range modules.GenesisBlock.Transactions { - // Create the diffs for the genesis siacoin outputs. - for i, siacoinOutput := range transaction.SiacoinOutputs { - scid := transaction.SiacoinOutputID(i) - scod := modules.SiacoinOutputDiff{ - Direction: modules.DiffApply, - ID: scid, - SiacoinOutput: siacoinOutput, - } - cs.blockRoot.SiacoinOutputDiffs = append(cs.blockRoot.SiacoinOutputDiffs, scod) - } - - // Create the diffs for the genesis Siafund outputs. - for i, siafundOutput := range transaction.SiafundOutputs { - sfid := transaction.SiafundOutputID(i) - sfod := modules.SiafundOutputDiff{ - Direction: modules.DiffApply, - ID: sfid, - SiafundOutput: siafundOutput, - } - cs.blockRoot.SiafundOutputDiffs = append(cs.blockRoot.SiafundOutputDiffs, sfod) - } - } - - // Initialize the consensus persistence structures. - err := cs.initPersist(dir) - if err != nil { - return nil, err - } - return cs, nil -} - -// consensusSetAsyncStartup handles the async portion of New. -func consensusSetAsyncStartup(cs *ConsensusSet, bootstrap bool) error { - // Sync with the network. - if bootstrap { - err := cs.managedInitialBlockchainDownload() - if err != nil { - return err - } - } - - // Register RPCs. - cs.gateway.RegisterRPC("SendBlocks", cs.rpcSendBlocks) - cs.gateway.RegisterRPC("RelayHeader", cs.threadedRPCRelayHeader) - cs.gateway.RegisterRPC("SendBlk", cs.rpcSendBlk) - cs.gateway.RegisterConnectCall("SendBlocks", cs.threadedReceiveBlocks) - cs.tg.OnStop(func() { - cs.gateway.UnregisterRPC("SendBlocks") - cs.gateway.UnregisterRPC("RelayHeader") - cs.gateway.UnregisterRPC("SendBlk") - cs.gateway.UnregisterConnectCall("SendBlocks") - }) - - // Mark that we are synced with the network. - cs.mu.Lock() - cs.synced = true - cs.mu.Unlock() - return nil -} - -// New returns a new ConsensusSet, containing at least the genesis block. If -// there is an existing block database, it will be loaded. -func New(db *sql.DB, gateway modules.Gateway, bootstrap bool, dir string) (*ConsensusSet, <-chan error) { - // Handle blocking consensus startup first. - errChan := make(chan error, 1) - cs, err := consensusSetBlockingStartup(gateway, db, dir) - if err != nil { - errChan <- err - return nil, errChan - } - - // Non-blocking consensus startup. - go func() { - defer close(errChan) - err := cs.tg.Add() - if err != nil { - errChan <- err - return - } - defer cs.tg.Done() - - err = consensusSetAsyncStartup(cs, bootstrap) - if err != nil { - errChan <- err - return - } - }() - return cs, errChan -} - -// BlockAtHeight returns the block at a given height. -func (cs *ConsensusSet) BlockAtHeight(height uint64) (block types.Block, exists bool) { - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return types.Block{}, false - } - - id, err := getBlockAtHeight(tx, height) - if err != nil { - if !errors.Is(err, sql.ErrNoRows) { - cs.log.Println("ERROR: unable to find block:", err) - } - tx.Rollback() - return types.Block{}, false - } - - pb, exists, err := findBlockByID(tx, id) - if err != nil { - cs.log.Println("ERROR: unable to find block:", err) - tx.Rollback() - return types.Block{}, false - } - if !exists { - cs.log.Println("ERROR: unable to find block", id) - tx.Rollback() - return types.Block{}, false - } - - tx.Commit() - return pb.Block, exists -} - -// BlockByID returns the block for a given BlockID. -func (cs *ConsensusSet) BlockByID(id types.BlockID) (block types.Block, height uint64, exists bool) { - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return types.Block{}, 0, false - } - - pb, exists, err := findBlockByID(tx, id) - if err != nil { - cs.log.Println("ERROR: unable to find block:", err) - tx.Rollback() - return types.Block{}, 0, false - } - - tx.Commit() - return pb.Block, pb.Height, exists -} - -// ChildTarget returns the target for the child of a block. -func (cs *ConsensusSet) ChildTarget(id types.BlockID) (target modules.Target, exists bool) { - err := cs.tg.Add() - if err != nil { - return modules.Target{}, false - } - defer cs.tg.Done() - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return modules.Target{}, false - } - - pb, exists, err := findBlockByID(tx, id) - if err != nil { - cs.log.Println("ERROR: unable to find block:", err) - tx.Rollback() - return modules.Target{}, false - } - - tx.Commit() - return pb.ChildTarget, exists -} - -// Close safely closes the consensus set. -func (cs *ConsensusSet) Close() error { - return cs.tg.Stop() -} - -// managedCurrentBlock returns the latest block in the heaviest known blockchain. -func (cs *ConsensusSet) managedCurrentBlock() (block types.Block) { - cs.mu.RLock() - defer cs.mu.RUnlock() - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return types.Block{} - } - - pb := currentProcessedBlock(tx) - if pb == nil { - tx.Rollback() - return types.Block{} - } - - tx.Commit() - return pb.Block -} - -// CurrentBlock returns the latest block in the heaviest known blockchain. -func (cs *ConsensusSet) CurrentBlock() (block types.Block) { - err := cs.tg.Add() - if err != nil { - return types.Block{} - } - defer cs.tg.Done() - - // Block until a lock can be grabbed on the consensus set, indicating that - // all modules have received the most recent block. The lock is held so that - // there are no race conditions when trying to synchronize nodes. - cs.mu.Lock() - defer cs.mu.Unlock() - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return types.Block{} - } - - pb := currentProcessedBlock(tx) - if pb == nil { - tx.Rollback() - return types.Block{} - } - - tx.Commit() - return pb.Block -} - -// Height returns the height of the consensus set. -func (cs *ConsensusSet) Height() (height uint64) { - err := cs.tg.Add() - if err != nil { - return 0 - } - defer cs.tg.Done() - - // Block until a lock can be grabbed on the consensus set, indicating that - // all modules have received the most recent block. The lock is held so that - // there are no race conditions when trying to synchronize nodes. - cs.mu.Lock() - defer cs.mu.Unlock() - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return 0 - } - - height = blockHeight(tx) - tx.Commit() - - return height -} - -// InCurrentPath returns true if the block presented is in the current path, -// false otherwise. -func (cs *ConsensusSet) InCurrentPath(id types.BlockID) (inPath bool) { - err := cs.tg.Add() - if err != nil { - return false - } - defer cs.tg.Done() - - tx, err := cs.db.Begin() - if err != nil { - return false - } - - pb, exists, err := findBlockByID(tx, id) - if err != nil || !exists { - tx.Rollback() - return false - } - - pathID, err := getBlockAtHeight(tx, pb.Height) - if err != nil { - tx.Rollback() - return false - } - - tx.Commit() - return pathID == id -} - -// MinimumValidChildTimestamp returns the earliest timestamp that the next block -// can have in order for it to be considered valid. -func (cs *ConsensusSet) MinimumValidChildTimestamp(id types.BlockID) (timestamp time.Time, exists bool) { - err := cs.tg.Add() - if err != nil { - return - } - defer cs.tg.Done() - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return - } - - pb, exists, err := findBlockByID(tx, id) - if err != nil { - tx.Rollback() - return - } - - tx.Commit() - timestamp = cs.minimumValidChildTimestamp(tx, pb) - exists = true - - return -} - -// StorageProofSegment returns the segment to be used in the storage proof for -// a given file contract. -func (cs *ConsensusSet) StorageProofSegment(fcid types.FileContractID) (index uint64, err error) { - err = cs.tg.Add() - if err != nil { - return 0, err - } - defer cs.tg.Done() - - tx, err := cs.db.Begin() - if err != nil { - return 0, err - } - - index, err = storageProofSegment(tx, fcid) - if err != nil { - tx.Rollback() - return 0, err - } - - tx.Commit() - return index, nil -} - -// FoundationUnlockHashes returns the current primary and failsafe Foundation -// UnlockHashes. -func (cs *ConsensusSet) FoundationUnlockHashes() (primary, failsafe types.Address) { - if err := cs.tg.Add(); err != nil { - return - } - defer cs.tg.Done() - - tx, err := cs.db.Begin() - if err != nil { - return - } - - primary, failsafe, err = getFoundationUnlockHashes(tx) - if err != nil { - cs.log.Println("ERROR: unable to get the Foundation unlock hashes:", err) - tx.Rollback() - return - } - - tx.Commit() - return -} diff --git a/modules/consensus/difficulty.go b/modules/consensus/difficulty.go deleted file mode 100644 index 75ee607..0000000 --- a/modules/consensus/difficulty.go +++ /dev/null @@ -1,231 +0,0 @@ -package consensus - -import ( - "database/sql" - "encoding/binary" - "errors" - "math/big" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// Errors returned by this file. -var ( - // errOakHardforkIncompatibility is the error returned if Oak initialization - // cannot begin because the consensus database was not upgraded before the - // hardfork height. - errOakHardforkIncompatibility = errors.New("difficulty adjustment hardfork incompatibility detected") -) - -// childTargetOak sets the child target based on the total time delta and total -// hashrate of the parent block. The deltas are known for the child block, -// however we do not use the child block deltas because that would allow the -// child block to influence the target of the following block, which makes abuse -// easier in selfish mining scenarios. -func (cs *ConsensusSet) childTargetOak(parentTotalTime int64, parentTotalTarget, currentTarget modules.Target, parentHeight uint64, parentTimestamp time.Time) modules.Target { - // Determine the delta of the current total time vs. the desired total time. - // The desired total time is the difference between the genesis block - // timestamp and the current block timestamp. - var delta int64 - if parentHeight < modules.OakHardforkFixBlock { - // This is the original code. It is incorrect, because it is comparing - // 'expectedTime', an absolute value, to 'parentTotalTime', a value - // which gets compressed every block. The result is that 'expectedTime' - // is substantially larger than 'parentTotalTime' always, and that the - // shifter is always reading that blocks have been coming out far too - // quickly. - expectedTime := int64(modules.BlockFrequency * parentHeight) - delta = expectedTime - parentTotalTime - } else { - // This is the correct code. The expected time is an absolute time based - // on the genesis block, and the delta is an absolute time based on the - // timestamp of the parent block. - // - // Rules elsewhere in consensus ensure that the timestamp of the parent - // block has not been manipulated by more than a few hours, which is - // accurate enough for this logic to be safe. - expectedTime := int64(modules.BlockFrequency*parentHeight) + modules.GenesisTimestamp.Unix() - delta = expectedTime - parentTimestamp.Unix() - } - // Convert the delta in to a target block time. - square := delta * delta - if delta < 0 { - // If the delta is negative, restore the negative value. - square *= -1 - } - shift := square / 10e6 // 10e3 second delta leads to 10 second shift. - targetBlockTime := int64(modules.BlockFrequency) + shift - - // Clamp the block time to 1/3 and 3x the target block time. - if targetBlockTime < int64(modules.BlockFrequency)/modules.OakMaxBlockShift { - targetBlockTime = int64(modules.BlockFrequency) / modules.OakMaxBlockShift - } - if targetBlockTime > int64(modules.BlockFrequency)*modules.OakMaxBlockShift { - targetBlockTime = int64(modules.BlockFrequency) * modules.OakMaxBlockShift - } - - // Determine the hashrate using the total time and total target. Set a - // minimum total time of 1 to prevent divide by zero and underflows. - if parentTotalTime < 1 { - parentTotalTime = 1 - } - visibleHashrate := parentTotalTarget.Difficulty().Div64(uint64(parentTotalTime)) // Hashes per second. - // Handle divide by zero risks. - if visibleHashrate.IsZero() { - visibleHashrate = visibleHashrate.Add(types.NewCurrency64(1)) - } - if targetBlockTime == 0 { - // This code can only possibly be triggered if the block frequency is - // less than 3, but during testing the block frequency is 1. - targetBlockTime = 1 - } - - // Determine the new target by multiplying the visible hashrate by the - // target block time. Clamp it to a 0.4% difficulty adjustment. - maxNewTarget := currentTarget.MulDifficulty(modules.OakMaxRise) // Max = difficulty increase (target decrease) - minNewTarget := currentTarget.MulDifficulty(modules.OakMaxDrop) // Min = difficulty decrease (target increase) - newTarget := modules.RatToTarget(new(big.Rat).SetFrac(modules.RootDepth.Int(), visibleHashrate.Mul64(uint64(targetBlockTime)).Big())) - if newTarget.Cmp(maxNewTarget) < 0 && parentHeight+1 != modules.ASICHardforkHeight { - newTarget = maxNewTarget - } - if newTarget.Cmp(minNewTarget) > 0 && parentHeight+1 != modules.ASICHardforkHeight { - // This can only possibly trigger if the BlockFrequency is less than 3 - // seconds. - newTarget = minNewTarget - } - return newTarget -} - -// getBlockTotals returns the block totals values that get stored in -// storeBlockTotals. -func (cs *ConsensusSet) getBlockTotals(tx *sql.Tx, id types.BlockID) (totalTime int64, totalTarget modules.Target, err error) { - totalsBytes := make([]byte, 40) - err = tx.QueryRow(`SELECT bytes FROM cs_oak WHERE bid = ?`, id[:]).Scan(&totalsBytes) - if err != nil { - cs.log.Println("ERROR: unable to retrieve Oak data:", err) - return - } - totalTime = int64(binary.LittleEndian.Uint64(totalsBytes[:8])) - copy(totalTarget[:], totalsBytes[8:]) - return -} - -// storeBlockTotals computes the new total time and total target for the current -// block and stores that new time in the database. It also returns the new -// totals. -func (cs *ConsensusSet) storeBlockTotals(tx *sql.Tx, currentHeight uint64, currentBlockID types.BlockID, prevTotalTime int64, parentTimestamp, currentTimestamp time.Time, prevTotalTarget, targetOfCurrentBlock modules.Target) (newTotalTime int64, newTotalTarget modules.Target, err error) { - // Reset the prevTotalTime to a delta of zero just before the hardfork. - // - // NOTICE: This code is broken, an incorrectly executed hardfork. The - // correct thing to do was to not put in these 3 lines of code. It is - // correct to not have them. - // - // This code is incorrect, and introduces an unfortunate drop in difficulty, - // because this is an uncompressed prevTotalTime, but really it should be - // getting set to a compressed prevTotalTime. And, actually, a compressed - // prevTotalTime doesn't have much meaning, so this code block shouldn't be - // here at all. But... this is the code that was running for the block - // 135,000 hardfork, so this code needs to stay. With the standard - // constants, it should cause a disruptive bump that lasts only a few days. - // - // The disruption will be complete well before we can deploy a fix, so - // there's no point in fixing it. - if currentHeight == modules.OakHardforkBlock-1 { - prevTotalTime = int64(modules.BlockFrequency * currentHeight) - } - - // For each value, first multiply by the decay, and then add in the new - // delta. - newTotalTime = (prevTotalTime * modules.OakDecayNum / modules.OakDecayDenom) + currentTimestamp.Unix() - parentTimestamp.Unix() - newTotalTarget = prevTotalTarget.MulDifficulty(big.NewRat(modules.OakDecayNum, modules.OakDecayDenom)).AddDifficulties(targetOfCurrentBlock) - - // At the hardfork height to adjust the acceptable nonce conditions, reset - // the total time and total target. - if currentHeight+1 == modules.ASICHardforkHeight { - newTotalTime = modules.ASICHardforkTotalTime - newTotalTarget = modules.ASICHardforkTotalTarget - } - - // Store the new total time and total target in the database at the - // appropriate id. - totalsBytes := make([]byte, 40) - binary.LittleEndian.PutUint64(totalsBytes[:8], uint64(newTotalTime)) - copy(totalsBytes[8:], newTotalTarget[:]) - _, err = tx.Exec(` - INSERT INTO cs_oak (bid, bytes) VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE bytes = new.bytes - `, currentBlockID[:], totalsBytes) - if err != nil { - return 0, modules.Target{}, modules.AddContext(err, "unable to store total time values") - } - return newTotalTime, newTotalTarget, nil -} - -// initOak will initialize all of the Oak difficulty adjustment related fields. -// This is separate from the initialization process for compatibility reasons - -// some databases will not have these fields at start, so it must be checked. -// -// After oak initialization is complete, a specific field is marked so that -// oak initialization can be skipped in the future. -func (cs *ConsensusSet) initOak(tx *sql.Tx) error { - // Check whether the init field is set. - var init bool - err := tx.QueryRow("SELECT init FROM cs_oak_init WHERE id = 1").Scan(&init) - if err != nil && !errors.Is(err, sql.ErrNoRows) { - return err - } - if init { - // The Oak fields have been initialized, nothing to do. - return nil - } - - // If the current height is greater than the hardfork trigger date, return - // an error and refuse to initialize. - height := blockHeight(tx) - if height > modules.OakHardforkBlock { - return errOakHardforkIncompatibility - } - - // Store base values for the genesis block. - totalTime, totalTarget, err := cs.storeBlockTotals(tx, 0, modules.GenesisID, 0, modules.GenesisTimestamp, modules.GenesisTimestamp, modules.RootDepth, modules.RootTarget) - if err != nil { - return modules.AddContext(err, "unable to store genesis block totals") - } - - // The Oak fields has not been initialized, scan through the consensus set - // and set the fields for each block. - parentTimestamp := modules.GenesisTimestamp - parentChildTarget := modules.RootTarget - for i := uint64(1); i <= height; i++ { // Skip Genesis block. - // Fetch the processed block for the current block. - id, err := getBlockAtHeight(tx, i) - if err != nil { - return modules.AddContext(err, "unable to find block at height") - } - pb, exists, err := findBlockByID(tx, id) - if err != nil { - return modules.AddContext(err, "unable to find block from id") - } - if !exists { - return errors.New("unable to find block from id") - } - - // Calculate and store the new block totals. - totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, pb.Block.Timestamp, totalTarget, parentChildTarget) - if err != nil { - return modules.AddContext(err, "unable to store updated block totals") - } - // Update the previous values. - parentTimestamp = pb.Block.Timestamp - parentChildTarget = pb.ChildTarget - } - - // Tag the initialization field, indicating that initialization has - // completed. - _, err = tx.Exec("REPLACE INTO cs_oak_init (id, init) VALUES (1, TRUE)") - - return err -} diff --git a/modules/consensus/diffs.go b/modules/consensus/diffs.go deleted file mode 100644 index 1573b7f..0000000 --- a/modules/consensus/diffs.go +++ /dev/null @@ -1,219 +0,0 @@ -package consensus - -import ( - "database/sql" - "errors" - - "github.com/mike76-dev/sia-satellite/modules" -) - -var ( - errInvalidSuccessor = errors.New("generating diffs for a block that's an invalid successsor to the current block") -) - -// commitSiacoinOutputDiff applies or reverts a SiacoinOutputDiff. -func commitSiacoinOutputDiff(tx *sql.Tx, scod modules.SiacoinOutputDiff, dir modules.DiffDirection) error { - if scod.Direction == dir { - return addSiacoinOutput(tx, scod.ID, scod.SiacoinOutput) - } else { - return removeSiacoinOutput(tx, scod.ID) - } -} - -// commitFileContractDiff applies or reverts a FileContractDiff. -func commitFileContractDiff(tx *sql.Tx, fcd modules.FileContractDiff, dir modules.DiffDirection) error { - if fcd.Direction == dir { - return addFileContract(tx, fcd.ID, fcd.FileContract) - } else { - return removeFileContract(tx, fcd.ID) - } -} - -// commitSiafundOutputDiff applies or reverts a Siafund output diff. -func commitSiafundOutputDiff(tx *sql.Tx, sfod modules.SiafundOutputDiff, dir modules.DiffDirection) error { - if sfod.Direction == dir { - return addSiafundOutput(tx, sfod.ID, sfod.SiafundOutput, sfod.ClaimStart) - } else { - return removeSiafundOutput(tx, sfod.ID) - } -} - -// commitDelayedSiacoinOutputDiff applies or reverts a delayedSiacoinOutputDiff. -func commitDelayedSiacoinOutputDiff(tx *sql.Tx, dscod modules.DelayedSiacoinOutputDiff, dir modules.DiffDirection) error { - if dscod.Direction == dir { - return addDSCO(tx, dscod.MaturityHeight, dscod.ID, dscod.SiacoinOutput) - } else { - return removeDSCO(tx, dscod.MaturityHeight, dscod.ID) - } -} - -// commitSiafundPoolDiff applies or reverts a SiafundPoolDiff. -func commitSiafundPoolDiff(tx *sql.Tx, sfpd modules.SiafundPoolDiff, dir modules.DiffDirection) error { - if dir == modules.DiffApply { - return setSiafundPool(tx, sfpd.Adjusted) - } else { - return setSiafundPool(tx, sfpd.Previous) - } -} - -// commitNodeDiffs commits all of the diffs in a block node. -func commitNodeDiffs(tx *sql.Tx, pb *processedBlock, dir modules.DiffDirection) (err error) { - if dir == modules.DiffApply { - for _, scod := range pb.SiacoinOutputDiffs { - if err := commitSiacoinOutputDiff(tx, scod, dir); err != nil { - return err - } - } - for _, fcd := range pb.FileContractDiffs { - if err := commitFileContractDiff(tx, fcd, dir); err != nil { - return err - } - } - for _, sfod := range pb.SiafundOutputDiffs { - if err := commitSiafundOutputDiff(tx, sfod, dir); err != nil { - return err - } - } - for _, dscod := range pb.DelayedSiacoinOutputDiffs { - if err := commitDelayedSiacoinOutputDiff(tx, dscod, dir); err != nil { - return err - } - } - for _, sfpd := range pb.SiafundPoolDiffs { - if err := commitSiafundPoolDiff(tx, sfpd, dir); err != nil { - return err - } - } - } else { - for i := len(pb.SiacoinOutputDiffs) - 1; i >= 0; i-- { - if err := commitSiacoinOutputDiff(tx, pb.SiacoinOutputDiffs[i], dir); err != nil { - return err - } - } - for i := len(pb.FileContractDiffs) - 1; i >= 0; i-- { - if err := commitFileContractDiff(tx, pb.FileContractDiffs[i], dir); err != nil { - return err - } - } - for i := len(pb.SiafundOutputDiffs) - 1; i >= 0; i-- { - if err := commitSiafundOutputDiff(tx, pb.SiafundOutputDiffs[i], dir); err != nil { - return err - } - } - for i := len(pb.DelayedSiacoinOutputDiffs) - 1; i >= 0; i-- { - if err := commitDelayedSiacoinOutputDiff(tx, pb.DelayedSiacoinOutputDiffs[i], dir); err != nil { - return err - } - } - for i := len(pb.SiafundPoolDiffs) - 1; i >= 0; i-- { - if err := commitSiafundPoolDiff(tx, pb.SiafundPoolDiffs[i], dir); err != nil { - return err - } - } - } - return nil -} - -// updateCurrentPath updates the current path after applying a diff set. -func updateCurrentPath(tx *sql.Tx, pb *processedBlock, dir modules.DiffDirection) error { - // Update the current path. - if dir == modules.DiffApply { - return pushPath(tx, pb.Block.ID()) - } else { - return popPath(tx) - } -} - -// commitFoundationUpdate updates the current Foundation unlock hashes in -// accordance with the specified block and direction. -// -// Because these updates do not have associated diffs, we cannot apply multiple -// updates per block. Instead, we apply the first update and ignore the rest. -func commitFoundationUpdate(tx *sql.Tx, pb *processedBlock, dir modules.DiffDirection) (err error) { - if dir == modules.DiffApply { - for i := range pb.Block.Transactions { - if err := applyArbitraryData(tx, pb, pb.Block.Transactions[i]); err != nil { - return err - } - } - } else { - // Look for a set of prior unlock hashes for this height. - primary, failsafe, exists, err := getPriorFoundationUnlockHashes(tx, pb.Height) - if err != nil { - return err - } - if exists { - if err := setFoundationUnlockHashes(tx, primary, failsafe); err != nil { - return err - } - if err := deletePriorFoundationUnlockHashes(tx, pb.Height); err != nil { - return err - } - if err := transferFoundationOutputs(tx, pb.Height, primary); err != nil { - return err - } - } - } - return nil -} - -// commitDiffSet applies or reverts the diffs in a blockNode. -func commitDiffSet(tx *sql.Tx, pb *processedBlock, dir modules.DiffDirection) (err error) { - if err := commitNodeDiffs(tx, pb, dir); err != nil { - return err - } - if err := commitFoundationUpdate(tx, pb, dir); err != nil { - return err - } - return updateCurrentPath(tx, pb, dir) -} - -// generateAndApplyDiff will verify the block and then integrate it into the -// consensus state. These two actions must happen at the same time because -// transactions are allowed to depend on each other. We can't be sure that a -// transaction is valid unless we have applied all of the previous transactions -// in the block, which means we need to apply while we verify. -func generateAndApplyDiff(tx *sql.Tx, pb *processedBlock) error { - // Sanity check - the block being applied should have the current block as - // a parent. - if pb.Block.ParentID != currentBlockID(tx) { - return errInvalidSuccessor - } - - // Validate and apply each transaction in the block. They cannot be - // validated all at once because some transactions may not be valid until - // previous transactions have been applied. - for _, txn := range pb.Block.Transactions { - if err := validTransaction(tx, txn); err != nil { - return err - } - if err := applyTransaction(tx, pb, txn); err != nil { - return err - } - } - - // After all of the transactions have been applied, 'maintenance' is - // applied on the block. This includes adding any outputs that have reached - // maturity, applying any contracts with missed storage proofs, and adding - // the miner payouts and Foundation subsidy to the list of delayed outputs. - if err := applyMaintenance(tx, pb); err != nil { - return err - } - - // DiffsGenerated are only set to true after the block has been fully - // validated and integrated. This is required to prevent later blocks from - // being accepted on top of an invalid block - if the consensus set ever - // forks over an invalid block, 'DiffsGenerated' will be set to 'false', - // requiring validation to occur again. when 'DiffsGenerated' is set to - // true, validation is skipped, therefore the flag should only be set to - // true on fully validated blocks. - pb.DiffsGenerated = true - - // Add the block to the current path and block map. - bid := pb.Block.ID() - if err := updateCurrentPath(tx, pb, modules.DiffApply); err != nil { - return err - } - - return saveBlock(tx, bid, pb) -} diff --git a/modules/consensus/fork.go b/modules/consensus/fork.go deleted file mode 100644 index 8d6123e..0000000 --- a/modules/consensus/fork.go +++ /dev/null @@ -1,116 +0,0 @@ -package consensus - -import ( - "database/sql" - "errors" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - errExternalRevert = errors.New("cannot revert to block outside of current path") -) - -// backtrackToCurrentPath traces backwards from 'pb' until it reaches a block -// in the ConsensusSet's current path (the "common parent"). It returns the -// (inclusive) set of blocks between the common parent and 'pb', starting from -// the former. -func backtrackToCurrentPath(tx *sql.Tx, pb *processedBlock) []*processedBlock { - path := []*processedBlock{pb} - for { - // Error is not checked in production code - an error can only indicate - // that pb.Height > blockHeight. - currentPathID, _ := getBlockAtHeight(tx, pb.Height) - if currentPathID == pb.Block.ID() { - break - } - - // Prepend the next block to the list of blocks leading from the - // current path to the input block. - pb, exists, err := findBlockByID(tx, pb.Block.ParentID) - if err != nil { - return nil - } - if !exists { - return nil - } - path = append([]*processedBlock{pb}, path...) - if (currentPathID == types.BlockID{}) { - break - } - } - return path -} - -// revertToBlock will revert blocks from the ConsensusSet's current path until -// 'pb' is the current block. Blocks are returned in the order that they were -// reverted. 'pb' is not reverted. -func (cs *ConsensusSet) revertToBlock(tx *sql.Tx, pb *processedBlock) (revertedBlocks []*processedBlock, err error) { - // Sanity check - make sure that pb is in the current path. - currentPathID, err := getBlockAtHeight(tx, pb.Height) - if err != nil || currentPathID != pb.Block.ID() { - return nil, errExternalRevert - } - - // Rewind blocks until 'pb' is the current block. - for currentBlockID(tx) != pb.Block.ID() { - block := currentProcessedBlock(tx) - if err := commitDiffSet(tx, block, modules.DiffRevert); err != nil { - return nil, err - } - revertedBlocks = append(revertedBlocks, block) - - // Sanity check - after removing a block, check that the consensus set - // has maintained consistency. - cs.maybeCheckConsistency(tx) - } - return revertedBlocks, nil -} - -// applyUntilBlock will successively apply the blocks between the consensus -// set's current path and 'pb'. -func (cs *ConsensusSet) applyUntilBlock(tx *sql.Tx, pb *processedBlock) (appliedBlocks []*processedBlock, err error) { - // Backtrack to the common parent of 'bn' and current path and then apply the new blocks. - newPath := backtrackToCurrentPath(tx, pb) - for _, block := range newPath[1:] { - // If the diffs for this block have already been generated, apply diffs - // directly instead of generating them. This is much faster. - if block.DiffsGenerated { - if err := commitDiffSet(tx, block, modules.DiffApply); err != nil { - return nil, err - } - } else { - err := generateAndApplyDiff(tx, block) - if err != nil { - // Mark the block as invalid. - addDoSBlock(tx, block.Block.ID()) - return nil, err - } - } - appliedBlocks = append(appliedBlocks, block) - - // Sanity check - after applying a block, check that the consensus set - // has maintained consistency. - cs.maybeCheckConsistency(tx) - } - return appliedBlocks, nil -} - -// forkBlockchain will move the consensus set onto the 'newBlock' fork. An -// error will be returned if any of the blocks applied in the transition are -// found to be invalid. forkBlockchain is atomic; the ConsensusSet is only -// updated if the function returns nil. -func (cs *ConsensusSet) forkBlockchain(tx *sql.Tx, newBlock *processedBlock) (revertedBlocks, appliedBlocks []*processedBlock, err error) { - commonParent := backtrackToCurrentPath(tx, newBlock)[0] - revertedBlocks, err = cs.revertToBlock(tx, commonParent) - if err != nil { - return nil, nil, err - } - appliedBlocks, err = cs.applyUntilBlock(tx, newBlock) - if err != nil { - return nil, nil, err - } - return revertedBlocks, appliedBlocks, nil -} diff --git a/modules/consensus/persist.go b/modules/consensus/persist.go deleted file mode 100644 index 5247648..0000000 --- a/modules/consensus/persist.go +++ /dev/null @@ -1,119 +0,0 @@ -package consensus - -import ( - "database/sql" - "errors" - "fmt" - "path/filepath" - - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" -) - -const ( - // logfile contains the filename of the consensus log. - logFile = "consensus.log" -) - -var ( - // errFoundationHardforkIncompatibility is returned if the consensus - // database was not upgraded prior to the Foundation hardfork height. - errFoundationHardforkIncompatibility = errors.New("cannot upgrade database for Foundation hardfork after activation height") -) - -// loadDB pulls all the blocks that have been saved to disk into memory, using -// them to fill out the ConsensusSet. -func (cs *ConsensusSet) loadDB() error { - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - - // Check if the database has been initialized. - err = cs.initDB(tx) - if err != nil { - tx.Rollback() - return err - } - - // Check the initialization of the oak difficulty adjustment fields, and - // create them if they do not exist. - err = cs.initOak(tx) - if err != nil { - tx.Rollback() - return err - } - - // Initialize the Foundation hardfork fields, if necessary. - err = initFoundation(tx) - if err != nil { - tx.Rollback() - return err - } - - // Check that the genesis block is correct - typically only incorrect - // in the event of developer binaries vs. release binaires. - genesisID, err := getBlockAtHeight(tx, 0) - if err != nil { - tx.Rollback() - return err - } - if genesisID != cs.blockRoot.Block.ID() { - tx.Rollback() - return errors.New("blockchain has wrong genesis block") - } - - return tx.Commit() -} - -// initFoundation initializes the database fields relating to the Foundation -// subsidy hardfork. If these fields have already been set, it does nothing. -func initFoundation(tx *sql.Tx) error { - var count int - err := tx.QueryRow("SELECT COUNT(*) FROM cs_fuh_current").Scan(&count) - if err != nil { - return err - } - if count > 0 { - // UnlockHashes have already been set; nothing to do. - return nil - } - // If the current height is greater than the hardfork trigger date, return - // an error and refuse to initialize. - height := blockHeight(tx) - if height >= modules.FoundationHardforkHeight { - return errFoundationHardforkIncompatibility - } - // Set the initial Foundation addresses. - err = setFoundationUnlockHashes(tx, modules.InitialFoundationUnlockHash, modules.InitialFoundationFailsafeUnlockHash) - - return err -} - -// initPersist initializes the persistence structures of the consensus set, in -// particular loading the database and preparing to manage subscribers. -func (cs *ConsensusSet) initPersist(dir string) error { - // Initialize the logger. - var err error - cs.log, err = persist.NewFileLogger(filepath.Join(dir, logFile)) - if err != nil { - return err - } - // Set up closing the logger. - cs.tg.AfterStop(func() { - err := cs.log.Close() - if err != nil { - // State of the logger is unknown, a println will suffice. - fmt.Println("Error shutting down consensus set logger:", err) - } - }) - - // Load the database. - err = cs.loadDB() - if err != nil { - return err - } - - return nil -} diff --git a/modules/consensus/processedblock.go b/modules/consensus/processedblock.go deleted file mode 100644 index 1124bce..0000000 --- a/modules/consensus/processedblock.go +++ /dev/null @@ -1,227 +0,0 @@ -package consensus - -import ( - "database/sql" - "math/big" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// SurpassThreshold is a percentage that dictates how much heavier a competing -// chain has to be before the node will switch to mining on that chain. This is -// not a consensus rule. This percentage is only applied to the most recent -// block, not the entire chain; see blockNode.heavierThan. -// -// If no threshold were in place, it would be possible to manipulate a block's -// timestamp to produce a sufficiently heavier block. -var SurpassThreshold = big.NewRat(20, 100) - -// processedBlock is a copy/rename of blockNode, with the pointers to -// other blockNodes replaced with block ID's, and all the fields -// exported, so that a block node can be marshalled. -type processedBlock struct { - Block types.Block - Height uint64 - Depth modules.Target - ChildTarget modules.Target - - DiffsGenerated bool - SiacoinOutputDiffs []modules.SiacoinOutputDiff - FileContractDiffs []modules.FileContractDiff - SiafundOutputDiffs []modules.SiafundOutputDiff - DelayedSiacoinOutputDiffs []modules.DelayedSiacoinOutputDiff - SiafundPoolDiffs []modules.SiafundPoolDiff - - ConsensusChecksum types.Hash256 -} - -// EncodeTo implements types.EncoderTo. -func (pb *processedBlock) EncodeTo(e *types.Encoder) { - pb.Block.EncodeTo(e) - e.WriteUint64(pb.Height) - e.Write(pb.Depth[:]) - e.Write(pb.ChildTarget[:]) - e.WriteBool(pb.DiffsGenerated) - e.WritePrefix(len(pb.SiacoinOutputDiffs)) - for _, sod := range pb.SiacoinOutputDiffs { - sod.EncodeTo(e) - } - e.WritePrefix(len(pb.FileContractDiffs)) - for _, fcd := range pb.FileContractDiffs { - fcd.EncodeTo(e) - } - e.WritePrefix(len(pb.SiafundOutputDiffs)) - for _, sfd := range pb.SiafundOutputDiffs { - sfd.EncodeTo(e) - } - e.WritePrefix(len(pb.DelayedSiacoinOutputDiffs)) - for _, dsod := range pb.DelayedSiacoinOutputDiffs { - dsod.EncodeTo(e) - } - e.WritePrefix(len(pb.SiafundPoolDiffs)) - for _, spd := range pb.SiafundPoolDiffs { - spd.EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom. -func (pb *processedBlock) DecodeFrom(d *types.Decoder) { - pb.Block.DecodeFrom(d) - pb.Height = d.ReadUint64() - d.Read(pb.Depth[:]) - d.Read(pb.ChildTarget[:]) - pb.DiffsGenerated = d.ReadBool() - pb.SiacoinOutputDiffs = make([]modules.SiacoinOutputDiff, d.ReadPrefix()) - for i := 0; i < len(pb.SiacoinOutputDiffs); i++ { - pb.SiacoinOutputDiffs[i].DecodeFrom(d) - } - pb.FileContractDiffs = make([]modules.FileContractDiff, d.ReadPrefix()) - for i := 0; i < len(pb.FileContractDiffs); i++ { - pb.FileContractDiffs[i].DecodeFrom(d) - } - pb.SiafundOutputDiffs = make([]modules.SiafundOutputDiff, d.ReadPrefix()) - for i := 0; i < len(pb.SiafundOutputDiffs); i++ { - pb.SiafundOutputDiffs[i].DecodeFrom(d) - } - pb.DelayedSiacoinOutputDiffs = make([]modules.DelayedSiacoinOutputDiff, d.ReadPrefix()) - for i := 0; i < len(pb.DelayedSiacoinOutputDiffs); i++ { - pb.DelayedSiacoinOutputDiffs[i].DecodeFrom(d) - } - pb.SiafundPoolDiffs = make([]modules.SiafundPoolDiff, d.ReadPrefix()) - for i := 0; i < len(pb.SiafundPoolDiffs); i++ { - pb.SiafundPoolDiffs[i].DecodeFrom(d) - } -} - -// heavierThan returns true if the blockNode is sufficiently heavier than -// 'cmp'. 'cmp' is expected to be the current block node. "Sufficient" means -// that the weight of 'bn' exceeds the weight of 'cmp' by: -// -// (the target of 'cmp' * 'Surpass Threshold') -func (pb *processedBlock) heavierThan(cmp *processedBlock) bool { - requirement := cmp.Depth.AddDifficulties(cmp.ChildTarget.MulDifficulty(SurpassThreshold)) - return requirement.Cmp(pb.Depth) > 0 // Inversed, because the smaller target is actually heavier. -} - -// childDepth returns the depth of a blockNode's child nodes. The depth is the -// "sum" of the current depth and current difficulty. See target.Add for more -// detailed information. -func (pb *processedBlock) childDepth() modules.Target { - return pb.Depth.AddDifficulties(pb.ChildTarget) -} - -// targetAdjustmentBase returns the magnitude that the target should be -// adjusted by before a clamp is applied. -func (cs *ConsensusSet) targetAdjustmentBase(tx *sql.Tx, pb *processedBlock) *big.Rat { - // Grab the block that was generated 'TargetWindow' blocks prior to the - // parent. If there are not 'TargetWindow' blocks yet, stop at the genesis - // block. - var windowSize uint64 - parentID := pb.Block.ParentID - currentID := pb.Block.ID() - var err error - for windowSize = 0; windowSize < modules.TargetWindow && parentID != (types.BlockID{}); windowSize++ { - currentID = parentID - parentID, _, err = getParentID(tx, parentID) - if err != nil { - cs.log.Println("ERROR: unable to find parent ID:", err) - return nil - } - } - - current, exists, err := findBlockByID(tx, currentID) - if err != nil || !exists { - cs.log.Println("ERROR: unable to find block:", err) - return nil - } - timestamp := current.Block.Timestamp - - // The target of a child is determined by the amount of time that has - // passed between the generation of its immediate parent and its - // TargetWindow'th parent. The expected amount of seconds to have passed is - // TargetWindow*BlockFrequency. The target is adjusted in proportion to how - // time has passed vs. the expected amount of time to have passed. - // - // The target is converted to a big.Rat to provide infinite precision - // during the calculation. The big.Rat is just the int representation of a - // target. - timePassed := pb.Block.Timestamp.Unix() - timestamp.Unix() - expectedTimePassed := modules.BlockFrequency * windowSize - return big.NewRat(int64(timePassed), int64(expectedTimePassed)) -} - -// clampTargetAdjustment returns a clamped version of the base adjustment -// value. The clamp keeps the maximum adjustment to ~7x every 2000 blocks. This -// ensures that raising and lowering the difficulty requires a minimum amount -// of total work, which prevents certain classes of difficulty adjusting -// attacks. -func clampTargetAdjustment(base *big.Rat) *big.Rat { - if base.Cmp(modules.MaxTargetAdjustmentUp) > 0 { - return modules.MaxTargetAdjustmentUp - } else if base.Cmp(modules.MaxTargetAdjustmentDown) < 0 { - return modules.MaxTargetAdjustmentDown - } - return base -} - -// setChildTarget computes the target of a blockNode's child. All children of a node -// have the same target. -func (cs *ConsensusSet) setChildTarget(tx *sql.Tx, pb *processedBlock) { - // Fetch the parent block. - parent, exists, err := findBlockByID(tx, pb.Block.ParentID) - if err != nil || !exists { - cs.log.Println("ERROR: unable to find block:", err) - return - } - - if pb.Height % (modules.TargetWindow / 2) != 0 { - pb.ChildTarget = parent.ChildTarget - return - } - - adjustment := clampTargetAdjustment(cs.targetAdjustmentBase(tx, pb)) - adjustedRatTarget := new(big.Rat).Mul(parent.ChildTarget.Rat(), adjustment) - pb.ChildTarget = modules.RatToTarget(adjustedRatTarget) -} - -// newChild creates a blockNode from a block and adds it to the parent's set of -// children. The new node is also returned. It necessarily modifies the database. -func (cs *ConsensusSet) newChild(tx *sql.Tx, pb *processedBlock, b types.Block) (*processedBlock, error) { - // Create the child node. - childID := b.ID() - child := &processedBlock{ - Block: b, - Height: pb.Height + 1, - Depth: pb.childDepth(), - } - - // Push the total values for this block into the oak difficulty adjustment - // bucket. The previous totals are required to compute the new totals. - prevTotalTime, prevTotalTarget, err := cs.getBlockTotals(tx, b.ParentID) - if err != nil { - cs.log.Println("ERROR: couldn't retrieve block totals:", err) - return nil, err - } - _, _, err = cs.storeBlockTotals(tx, child.Height, childID, prevTotalTime, pb.Block.Timestamp, b.Timestamp, prevTotalTarget, pb.ChildTarget) - if err != nil { - cs.log.Println("ERROR: couldn't save block totals:", err) - return nil, err - } - - // Use the difficulty adjustment algorithm to set the target of the child - // block and put the new processed block into the database. - if pb.Height < modules.OakHardforkBlock { - cs.setChildTarget(tx, child) - } else { - child.ChildTarget = cs.childTargetOak(prevTotalTime, prevTotalTarget, pb.ChildTarget, pb.Height, pb.Block.Timestamp) - } - err = saveBlock(tx, childID, child) - if err != nil { - cs.log.Println("ERROR: couldn't save new block:", err) - return nil, err - } - - return child, nil -} diff --git a/modules/consensus/subscribe.go b/modules/consensus/subscribe.go deleted file mode 100644 index 9d87c7a..0000000 --- a/modules/consensus/subscribe.go +++ /dev/null @@ -1,329 +0,0 @@ -package consensus - -import ( - "database/sql" - "errors" - - "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" -) - -// computeConsensusChangeDiffs computes the ConsensusChangeDiffs for the -// provided block. -func computeConsensusChangeDiffs(pb *processedBlock, apply bool) modules.ConsensusChangeDiffs { - if apply { - return modules.ConsensusChangeDiffs{ - SiacoinOutputDiffs: pb.SiacoinOutputDiffs, - FileContractDiffs: pb.FileContractDiffs, - SiafundOutputDiffs: pb.SiafundOutputDiffs, - DelayedSiacoinOutputDiffs: pb.DelayedSiacoinOutputDiffs, - SiafundPoolDiffs: pb.SiafundPoolDiffs, - } - } - // The order of the diffs needs to be flipped and the direction of the - // diffs also needs to be flipped. - cd := modules.ConsensusChangeDiffs{ - SiacoinOutputDiffs: make([]modules.SiacoinOutputDiff, len(pb.SiacoinOutputDiffs)), - FileContractDiffs: make([]modules.FileContractDiff, len(pb.FileContractDiffs)), - SiafundOutputDiffs: make([]modules.SiafundOutputDiff, len(pb.SiafundOutputDiffs)), - DelayedSiacoinOutputDiffs: make([]modules.DelayedSiacoinOutputDiff, len(pb.DelayedSiacoinOutputDiffs)), - SiafundPoolDiffs: make([]modules.SiafundPoolDiff, len(pb.SiafundPoolDiffs)), - } - for i, d := range pb.SiacoinOutputDiffs { - d.Direction = !d.Direction - cd.SiacoinOutputDiffs[len(cd.SiacoinOutputDiffs) - i - 1] = d - } - for i, d := range pb.FileContractDiffs { - d.Direction = !d.Direction - cd.FileContractDiffs[len(cd.FileContractDiffs) - i - 1] = d - } - for i, d := range pb.SiafundOutputDiffs { - d.Direction = !d.Direction - cd.SiafundOutputDiffs[len(cd.SiafundOutputDiffs) - i - 1] = d - } - for i, d := range pb.DelayedSiacoinOutputDiffs { - d.Direction = !d.Direction - cd.DelayedSiacoinOutputDiffs[len(cd.DelayedSiacoinOutputDiffs) - i - 1] = d - } - for i, d := range pb.SiafundPoolDiffs { - d.Direction = !d.Direction - cd.SiafundPoolDiffs[len(cd.SiafundPoolDiffs) - i - 1] = d - } - return cd -} - -// computeConsensusChange computes the consensus change from the change entry -// at index 'i' in the change log. If i is out of bounds, an error is returned. -func (cs *ConsensusSet) computeConsensusChange(tx *sql.Tx, ce changeEntry) (modules.ConsensusChange, error) { - cc := modules.ConsensusChange{ - ID: ce.ID(), - } - for _, revertedBlockID := range ce.RevertedBlocks { - revertedBlock, exists, err := findBlockByID(tx, revertedBlockID) - if err != nil || !exists { - cs.log.Println("CRITICAL: unable to find block in computeConsensusChange:", err) - return modules.ConsensusChange{}, err - } - cc.RevertedBlocks = append(cc.RevertedBlocks, revertedBlock.Block) - diffs := computeConsensusChangeDiffs(revertedBlock, false) - cc.RevertedDiffs = append(cc.RevertedDiffs, diffs) - cc.AppendDiffs(diffs) - } - for _, appliedBlockID := range ce.AppliedBlocks { - appliedBlock, exists, err := findBlockByID(tx, appliedBlockID) - if err != nil || !exists { - cs.log.Println("CRITICAL: unable to find block in computeConsensusChange:", err) - return modules.ConsensusChange{}, err - } - cc.AppliedBlocks = append(cc.AppliedBlocks, appliedBlock.Block) - diffs := computeConsensusChangeDiffs(appliedBlock, true) - cc.AppliedDiffs = append(cc.AppliedDiffs, diffs) - cc.AppendDiffs(diffs) - } - - // Grab the child target and the minimum valid child timestamp. - recentBlock := ce.AppliedBlocks[len(ce.AppliedBlocks) - 1] - pb, exists, err := findBlockByID(tx, recentBlock) - if err != nil || !exists { - cs.log.Println("CRITICAL: could not find process block for known block:", err) - return modules.ConsensusChange{}, err - } - - cc.ChildTarget = pb.ChildTarget - cc.MinimumValidChildTimestamp = cs.minimumValidChildTimestamp(tx, pb) - cc.BlockHeight = pb.Height - - currentBlock := currentBlockID(tx) - if cs.synced && recentBlock == currentBlock { - cc.Synced = true - } - - // Add the unexported tryTransactionSet function. - cc.TryTransactionSet = cs.tryTransactionSet - - return cc, nil -} - -// updateSubscribers will inform all subscribers of a new update to the -// consensus set. updateSubscribers does not alter the changelog, the changelog -// must be updated beforehand. -func (cs *ConsensusSet) updateSubscribers(ce changeEntry) { - if len(cs.subscribers) == 0 { - return - } - - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return - } - - // Compute the consensus change so it can be sent to subscribers. - cc, err := cs.computeConsensusChange(tx, ce) - if err != nil { - cs.log.Println("CRITICAL: computeConsensusChange failed:", err) - tx.Rollback() - return - } - - // Log re-orgs. - tx.Commit() - if len(cc.RevertedBlocks) > 0 { - cs.log.Println("INFO: ConsensusChange with re-org detected: ", cc.ID, len(cc.RevertedBlocks)) - } - - // Send the consensus change to all subscribers. - for _, subscriber := range cs.subscribers { - subscriber.ProcessConsensusChange(cc) - } -} - -// managedInitializeSubscribe will take a subscriber and feed them all of the -// consensus changes that have occurred since the change provided. -// -// As a special case, using an empty id as the start will have all the changes -// sent to the modules starting with the genesis block. -func (cs *ConsensusSet) managedInitializeSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID, cancel <-chan struct{}) (modules.ConsensusChangeID, error) { - if start == modules.ConsensusChangeRecent { - cs.mu.RLock() - defer cs.mu.RUnlock() - return cs.recentConsensusChangeID() - } - - // 'exists' and 'entry' are going to be pointed to the first entry that - // has not yet been seen by subscriber. - var exists bool - var entry changeEntry - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return modules.ConsensusChangeID{}, err - } - if start == modules.ConsensusChangeBeginning { - // Special case: for modules.ConsensusChangeBeginning, create an - // initial node pointing to the genesis block. The subscriber will - // receive the diffs for all blocks in the consensus set, including - // the genesis block. - entry = cs.genesisEntry() - exists = true - } else { - // The subscriber has provided an existing consensus change. - // Because the subscriber already has this consensus change, - // 'entry' and 'exists' need to be pointed at the next consensus - // change. - entry, exists = getEntry(tx, start) - if !exists { - // modules.ErrInvalidConsensusChangeID is a named error that - // signals a break in synchronization between the consensus set - // persistence and the subscriber persistence. Typically, - // receiving this error means that the subscriber needs to - // perform a rescan of the consensus set. - cs.mu.RUnlock() - tx.Rollback() - return modules.ConsensusChangeID{}, modules.ErrInvalidConsensusChangeID - } - entry, exists = entry.NextEntry(tx) - } - cs.mu.RUnlock() - - // Nothing to do if the changeEntry doesn't exist. - if !exists { - tx.Rollback() - return start, nil - } - - // Send all remaining consensus changes to the subscriber. - latestChangeID := entry.ID() - for exists { - // Send changes in batches of 100 so that we don't hold the - // lock for too long. - cs.mu.RLock() - for i := 0; i < 100 && exists; i++ { - latestChangeID = entry.ID() - select { - case <-cancel: - tx.Rollback() - return modules.ConsensusChangeID{}, sync.ErrStopped - default: - } - cc, err := cs.computeConsensusChange(tx, entry) - if err != nil { - cs.mu.RUnlock() - tx.Rollback() - return modules.ConsensusChangeID{}, err - } - subscriber.ProcessConsensusChange(cc) - entry, exists = entry.NextEntry(tx) - } - cs.mu.RUnlock() - } - - tx.Commit() - return latestChangeID, nil -} - -// recentConsensusChangeID gets the ConsensusChangeID of the most recent -// change. -func (cs *ConsensusSet) recentConsensusChangeID() (cid modules.ConsensusChangeID, err error) { - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return modules.ConsensusChangeID{}, err - } - - id := make([]byte, 32) - err = tx.QueryRow("SELECT bytes FROM cs_changelog WHERE id = 1").Scan(&id) - if err != nil { - tx.Rollback() - return modules.ConsensusChangeID{}, err - } - - tx.Commit() - copy(cid[:], id[:]) - return -} - -// ConsensusSetSubscribe adds a subscriber to the list of subscribers, and -// gives them every consensus change that has occurred since the change with -// the provided id. -// -// As a special case, using an empty id as the start will have all the changes -// sent to the modules starting with the genesis block. -func (cs *ConsensusSet) ConsensusSetSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID, - cancel <-chan struct{}) error { - err := cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - - // Call managedInitializeSubscribe until the new module is up-to-date. - for { - start, err = cs.managedInitializeSubscribe(subscriber, start, cancel) - if err != nil { - return err - } - - // Check if the start equals the most recent change id. If it does we - // are done. If it doesn't, we need to call managedInitializeSubscribe - // again. - cs.mu.Lock() - recentID, err := cs.recentConsensusChangeID() - if err != nil { - cs.mu.Unlock() - return err - } - if start == recentID { - // break out of the loop while still holding to lock to avoid - // updating subscribers before the new module is appended to the - // list of subscribers. - defer cs.mu.Unlock() - break - } - cs.mu.Unlock() - - // Check for shutdown. - select { - case <-cs.tg.StopChan(): - return sync.ErrStopped - default: - } - } - - // Add the module to the list of subscribers. - // Sanity check - subscriber should not be already subscribed. - for _, s := range cs.subscribers { - if s == subscriber { - cs.log.Println("CRITICAL: refusing to double-subscribe subscriber") - return errors.New("subscriber already registered") - } - } - cs.subscribers = append(cs.subscribers, subscriber) - return nil -} - -// Unsubscribe removes a subscriber from the list of subscribers, allowing for -// garbage collection and rescanning. If the subscriber is not found in the -// subscriber database, no action is taken. -func (cs *ConsensusSet) Unsubscribe(subscriber modules.ConsensusSetSubscriber) { - if cs.tg.Add() != nil { - return - } - defer cs.tg.Done() - cs.mu.Lock() - defer cs.mu.Unlock() - - // Search for the subscriber in the list of subscribers and remove it if - // found. - for i := range cs.subscribers { - if cs.subscribers[i] == subscriber { - // nil the subscriber entry (otherwise it will not be GC'd if it's - // at the end of the subscribers slice). - cs.subscribers[i] = nil - // Delete the entry from the slice. - cs.subscribers = append(cs.subscribers[0:i], cs.subscribers[i + 1:]...) - break - } - } -} diff --git a/modules/consensus/synchronize.go b/modules/consensus/synchronize.go deleted file mode 100644 index 23c79d2..0000000 --- a/modules/consensus/synchronize.go +++ /dev/null @@ -1,707 +0,0 @@ -package consensus - -import ( - "bytes" - "database/sql" - "encoding/binary" - "errors" - "io" - "net" - "sync" - "time" - - siasync "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -const ( - // minNumOutbound is the minimum number of outbound peers required before ibd - // is confident we are synced. - minNumOutbound = 5 -) - -var ( - errNilProcBlock = errors.New("nil processed block was fetched from the database") - errSendBlocksStalled = errors.New("SendBlocks RPC timed and never received any blocks") - - // ibdLoopDelay is the time that managedInitialBlockchainDownload waits - // between attempts to synchronize with the network if the last attempt - // failed. - ibdLoopDelay = 10 * time.Second - - // MaxCatchUpBlocks is the maxiumum number of blocks that can be given to - // the consensus set in a single iteration during the initial blockchain - // download. - MaxCatchUpBlocks = uint64(10) - - // minIBDWaitTime is the time managedInitialBlockchainDownload waits before - // exiting if there are >= 1 and <= minNumOutbound peers synced. This timeout - // will primarily affect miners who have multiple nodes daisy chained off each - // other. Those nodes will likely have to wait minIBDWaitTime on every startup - // before IBD is done. - minIBDWaitTime = 90 * time.Minute - - // relayHeaderTimeout is the timeout for the RelayHeader RPC. - relayHeaderTimeout = 60 * time.Second - - // sendBlkTimeout is the timeout for the SendBlk RPC. - sendBlkTimeout = 90 * time.Second - - // sendBlocksTimeout is the timeout for the SendBlocks RPC. - sendBlocksTimeout = 180 * time.Second -) - -// isTimeoutErr is a helper function that returns true if err was caused by a -// network timeout. -func isTimeoutErr(err error) bool { - if err == nil { - return false - } - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return true - } - return (err.Error() == "Read timeout" || err.Error() == "Write timeout") -} - -// blockHistory returns up to 32 block ids, starting with recent blocks and -// then proving exponentially increasingly less recent blocks. The genesis -// block is always included as the last block. This block history can be used -// to find a common parent that is reasonably recent, usually the most recent -// common parent is found, but always a common parent within a factor of 2 is -// found. -func blockHistory(tx *sql.Tx) (blockIDs [32]types.BlockID) { - height := blockHeight(tx) - step := uint64(1) - // The final step is to include the genesis block, which is why the final - // element is skipped during iteration. - for i := 0; i < 31; i++ { - // Include the next block. - blockID, err := getBlockAtHeight(tx, height) - if err != nil { - return - } - blockIDs[i] = blockID - - // Determine the height of the next block to include and then increase - // the step size. The height must be decreased first to prevent - // underflow. - // - // `i >= 9` means that the first 10 blocks will be included, and then - // skipping will start. - if i >= 9 { - step *= 2 - } - if height <= step { - break - } - height -= step - } - // Include the genesis block as the last element. - blockID, err := getBlockAtHeight(tx, 0) - if err != nil { - return - } - blockIDs[31] = blockID - return blockIDs -} - -// managedReceiveBlocks is the calling end of the SendBlocks RPC, without the -// threadgroup wrapping. -func (cs *ConsensusSet) managedReceiveBlocks(conn modules.PeerConn) (returnErr error) { - // Set a deadline after which SendBlocks will timeout. During IBD, especially, - // SendBlocks will timeout. This is by design so that IBD switches peers to - // prevent any one peer from stalling IBD. - err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) - if err != nil { - return err - } - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-cs.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - - // Check whether this RPC has timed out with the remote peer at the end of - // the fuction, and if so, return a custom error to signal that a new peer - // needs to be chosen. - stalled := true - defer func() { - if isTimeoutErr(returnErr) && stalled { - returnErr = errSendBlocksStalled - } - }() - - // Get blockIDs to send. - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - history := blockHistory(tx) - tx.Commit() - cs.mu.RUnlock() - - // Send the block ids. - e := types.NewEncoder(conn) - e.WriteUint64(32 * 32) - for _, bid := range history { - bid.EncodeTo(e) - } - e.Flush() - - // Broadcast the last block accepted. This functionality is in a defer to - // ensure that a block is always broadcast if any blocks are accepted. This - // is to stop an attacker from preventing block broadcasts. - chainExtended := false - defer func() { - cs.mu.RLock() - synced := cs.synced - cs.mu.RUnlock() - if synced && chainExtended { - fullBlock := cs.managedCurrentBlock() - go cs.gateway.Broadcast("RelayHeader", fullBlock.Header(), cs.gateway.Peers()) - } - }() - - // Read blocks off of the wire and add them to the consensus set until - // there are no more blocks available. - moreAvailable := true - for moreAvailable { - // Read a slice of blocks from the wire. - d := types.NewDecoder(io.LimitedReader{R: conn, N: int64(MaxCatchUpBlocks*modules.BlockSizeLimit) + 17}) - _ = d.ReadUint64() - num := d.ReadPrefix() - newBlocks := make([]types.Block, num) - for i := 0; i < num; i++ { - newBlocks[i].DecodeFrom(d) - } - if err := d.Err(); err != nil { - return err - } - _ = d.ReadUint64() - moreAvailable = d.ReadBool() - if err := d.Err(); err != nil { - return err - } - if len(newBlocks) == 0 { - continue - } - stalled = false - - // Call managedAcceptBlock instead of AcceptBlock so as not to broadcast - // every block. - extended, acceptErr := cs.managedAcceptBlocks(newBlocks) - if extended { - chainExtended = true - } - // ErrNonExtendingBlock must be ignored until headers-first block - // sharing is implemented, block already in database should also be - // ignored. - if acceptErr != nil && !modules.ContainsError(acceptErr, modules.ErrNonExtendingBlock) && !modules.ContainsError(acceptErr, modules.ErrBlockKnown) { - return acceptErr - } - } - return nil -} - -// threadedReceiveBlocks is the calling end of the SendBlocks RPC. -func (cs *ConsensusSet) threadedReceiveBlocks(conn modules.PeerConn) error { - err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) - if err != nil { - return err - } - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-cs.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - err = cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - return cs.managedReceiveBlocks(conn) -} - -// rpcSendBlocks is the receiving end of the SendBlocks RPC. It returns a -// sequential set of blocks based on the 32 input block IDs. The most recent -// known ID is used as the starting point, and up to 'MaxCatchUpBlocks' from -// that BlockHeight onwards are returned. It also sends a boolean indicating -// whether more blocks are available. -func (cs *ConsensusSet) rpcSendBlocks(conn modules.PeerConn) error { - err := conn.SetDeadline(time.Now().Add(sendBlocksTimeout)) - if err != nil { - return err - } - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-cs.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - err = cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - - // Read a list of blocks known to the requester and find the most recent - // block from the current path. - var knownBlocks [32]types.BlockID - d := types.NewDecoder(io.LimitedReader{R: conn, N: 32*32 + 8}) - _ = d.ReadUint64() - for i := 0; i < 32; i++ { - knownBlocks[i].DecodeFrom(d) - } - if err := d.Err(); err != nil { - return err - } - - // Find the most recent block from knownBlocks in the current path. - found := false - var start uint64 - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - err = func(tx *sql.Tx) error { - csHeight := blockHeight(tx) - for _, id := range knownBlocks { - pb, exists, err := findBlockByID(tx, id) - if err != nil || !exists { - continue - } - pathID, err := getBlockAtHeight(tx, pb.Height) - if err != nil { - continue - } - if pathID != pb.Block.ID() { - continue - } - if pb.Height == csHeight { - break - } - found = true - // Start from the child of the common block. - start = pb.Height + 1 - break - } - return nil - }(tx) - tx.Commit() - cs.mu.RUnlock() - if err != nil { - return err - } - - // If no matching blocks are found, or if the caller has all known blocks, - // don't send any blocks. - e := types.NewEncoder(conn) - if !found { - // Send 0 blocks. - e.WriteUint64(0) - e.WriteUint64(0) - // Indicate that no more blocks are available. - e.WriteUint64(1) - e.WriteBool(false) - e.Flush() - return nil - } - - // Send the caller all of the blocks that they are missing. - moreAvailable := true - for moreAvailable { - // Get the set of blocks to send. - var blocks []types.Block - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - err = func(tx *sql.Tx) error { - height := blockHeight(tx) - for i := start; i <= height && i < start+MaxCatchUpBlocks; i++ { - id, err := getBlockAtHeight(tx, i) - if err != nil { - cs.log.Printf("CRITICAL: unable to get path: height %v :: request %v\n", height, i) - return err - } - pb, exists, err := findBlockByID(tx, id) - if err != nil { - cs.log.Printf("CRITICAL: unable to get block from block map: height %v :: request %v :: id %s\n", height, i, id) - return err - } - if !exists { - cs.log.Printf("WARN: findBlockByID yielded 'nil' block: %v :: request %v :: id %s\n", height, i, id) - return errNilProcBlock - } - blocks = append(blocks, pb.Block) - } - moreAvailable = start+MaxCatchUpBlocks <= height - start += MaxCatchUpBlocks - return nil - }(tx) - tx.Commit() - cs.mu.RUnlock() - if err != nil { - return err - } - - // Send a set of blocks to the caller + a flag indicating whether more - // are available. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - e.WriteUint64(0) - e.WritePrefix(len(blocks)) - for _, block := range blocks { - block.EncodeTo(e) - } - e.Flush() - b := buf.Bytes() - binary.LittleEndian.PutUint64(b[:8], uint64(len(b)-8)) - _, err = conn.Write(b) - if err != nil { - return err - } - buf.Reset() - e.WritePrefix(1) - e.WriteBool(moreAvailable) - e.Flush() - b = buf.Bytes() - _, err = conn.Write(b) - if err != nil { - return err - } - } - - return nil -} - -// threadedRPCRelayHeader is an RPC that accepts a block header from a peer. -func (cs *ConsensusSet) threadedRPCRelayHeader(conn modules.PeerConn) error { - err := conn.SetDeadline(time.Now().Add(relayHeaderTimeout)) - if err != nil { - return err - } - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-cs.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - err = cs.tg.Add() - if err != nil { - return err - } - wg := new(sync.WaitGroup) - defer func() { - go func() { - wg.Wait() - cs.tg.Done() - }() - }() - - // Decode the block header from the connection. - var h types.BlockHeader - d := types.NewDecoder(io.LimitedReader{R: conn, N: 88}) - _ = d.ReadUint64() - h.DecodeFrom(d) - if err := d.Err(); err != nil { - return err - } - - // Do some relatively inexpensive checks to validate the header. - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - err = func(tx *sql.Tx) error { - return cs.validateHeader(tx, h) - }(tx) - tx.Commit() - cs.mu.RUnlock() - - // WARN: orphan multithreading logic (dangerous areas, see below). - // - // If the header is valid and extends the heaviest chain, fetch the - // corresponding block. Call needs to be made in a separate goroutine - // because an exported call to the gateway is used, which is a deadlock - // risk given that rpcRelayHeader is called from the gateway. - // - // NOTE: In general this is bad design. Rather than recycling other - // calls, the whole protocol should have been kept in a single RPC. - // Because it is not, we have to do weird threading to prevent - // deadlocks, and we also have to be concerned every time the code in - // managedReceiveBlock is adjusted. - if modules.ContainsError(err, errOrphan) { // WARN: orphan multithreading logic case #1. - wg.Add(1) - go func() { - defer wg.Done() - err := cs.gateway.RPC(conn.RPCAddr(), "SendBlocks", cs.managedReceiveBlocks) - if err != nil { - cs.log.Println("WARN: failed to get parents of orphan header:", err) - } - }() - return nil - } else if err != nil { - return err - } - - // WARN: orphan multithreading logic case #2. - wg.Add(1) - go func() { - defer wg.Done() - err = cs.gateway.RPC(conn.RPCAddr(), "SendBlk", cs.managedReceiveBlock(h.ID())) - if err != nil { - cs.log.Println("WARN: failed to get header's corresponding block:", err) - } - }() - return nil -} - -// rpcSendBlk is an RPC that sends the requested block to the requesting peer. -func (cs *ConsensusSet) rpcSendBlk(conn modules.PeerConn) error { - err := conn.SetDeadline(time.Now().Add(sendBlkTimeout)) - if err != nil { - return err - } - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-cs.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - err = cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - - // Decode the block id from the connection. - var id types.BlockID - d := types.NewDecoder(io.LimitedReader{R: conn, N: 40}) - _ = d.ReadUint64() - id.DecodeFrom(d) - if err := d.Err(); err != nil { - return err - } - - // Lookup the corresponding block. - var b types.Block - cs.mu.RLock() - tx, err := cs.db.Begin() - if err != nil { - cs.log.Println("ERROR: unable to start transaction:", err) - return err - } - err = func(tx *sql.Tx) error { - pb, exists, err := findBlockByID(tx, id) - if err != nil { - return err - } - if !exists { - return errors.New("block not found") - } - b = pb.Block - return nil - }(tx) - tx.Commit() - cs.mu.RUnlock() - if err != nil { - return err - } - - // Encode and send the block to the caller. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - e.WriteUint64(0) - b.EncodeTo(e) - e.Flush() - bb := buf.Bytes() - binary.LittleEndian.PutUint64(bb[:8], uint64(len(bb)-8)) - _, err = conn.Write(bb) - - return err -} - -// managedReceiveBlock takes a block id and returns an RPCFunc that requests that -// block and then calls AcceptBlock on it. The returned function should be used -// as the calling end of the SendBlk RPC. -func (cs *ConsensusSet) managedReceiveBlock(id types.BlockID) modules.RPCFunc { - return func(conn modules.PeerConn) error { - e := types.NewEncoder(conn) - e.WritePrefix(32) - id.EncodeTo(e) - e.Flush() - - var block types.Block - d := types.NewDecoder(io.LimitedReader{R: conn, N: int64(modules.BlockSizeLimit) + 8}) - _ = d.ReadUint64() - block.DecodeFrom(d) - if err := d.Err(); err != nil { - return err - } - - chainExtended, err := cs.managedAcceptBlocks([]types.Block{block}) - if chainExtended { - cs.managedBroadcastBlock(block) - } - if err != nil && !errors.Is(err, modules.ErrNonExtendingBlock) { - return err - } - return nil - } -} - -// managedInitialBlockchainDownload performs the IBD on outbound peers. Blocks -// are downloaded from one peer at a time in 5 minute intervals, so as to -// prevent any one peer from significantly slowing down IBD. -// -// NOTE: IBD will succeed right now when each peer has a different blockchain. -// The height and the block id of the remote peers' current blocks are not -// checked to be the same. This can cause issues if you are connected to -// outbound peers <= v0.5.1 that are stalled in IBD. -func (cs *ConsensusSet) managedInitialBlockchainDownload() error { - // The consensus set will not recognize IBD as complete until it has enough - // peers. After the deadline though, it will recognize the blockchain - // download as complete even with only one peer. This deadline is helpful - // to local-net setups, where a machine will frequently only have one peer - // (and that peer will be another machine on the same local network, but - // within the local network at least one peer is connected to the broad - // network). - deadline := time.Now().Add(minIBDWaitTime) - numOutboundSynced := 0 - numOutboundNotSynced := 0 - for { - numOutboundSynced = 0 - numOutboundNotSynced = 0 - for _, p := range cs.gateway.Peers() { - // We only sync on outbound peers at first to make IBD less susceptible to - // fast-mining and other attacks, as outbound peers are more difficult to - // manipulate. - if p.Inbound { - continue - } - - // Put the rest of the iteration inside of a thread group. - err := func() error { - err := cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - - // Request blocks from the peer. The error returned will only be - // 'nil' if there are no more blocks to receive. - err = cs.gateway.RPC(p.NetAddress, "SendBlocks", cs.managedReceiveBlocks) - if err == nil { - numOutboundSynced++ - // In this case, 'return nil' is equivalent to skipping to - // the next iteration of the loop. - return nil - } - numOutboundNotSynced++ - if !isTimeoutErr(err) { - cs.log.Printf("WARN: disconnecting from peer %v because IBD failed: %v", p.NetAddress, err) - // Disconnect if there is an unexpected error (not a timeout). This - // includes errSendBlocksStalled. - // - // We disconnect so that these peers are removed from gateway.Peers() and - // do not prevent us from marking ourselves as fully synced. - err := cs.gateway.Disconnect(p.NetAddress) - if err != nil { - cs.log.Printf("WARN: disconnecting from peer %v failed: %v", p.NetAddress, err) - } - } - return nil - }() - if err != nil { - return err - } - } - - // The consensus set is not considered synced until a majority of - // outbound peers say that we are synced. If less than 10 minutes have - // passed, a minimum of 'minNumOutbound' peers must say that we are - // synced, otherwise a 1 vs 0 majority is sufficient. - // - // This scheme is used to prevent malicious peers from being able to - // barricade the sync'd status of the consensus set, and to make sure - // that consensus sets behind a firewall with only one peer - // (potentially a local peer) are still able to eventually conclude - // that they have syncrhonized. Miners and hosts will often have setups - // beind a firewall where there is a single node with many peers and - // then the rest of the nodes only have a few peers. - if numOutboundSynced > numOutboundNotSynced && (numOutboundSynced >= minNumOutbound || time.Now().After(deadline)) { - break - } else { - // Sleep so we don't hammer the network with SendBlock requests. - if !cs.managedSleep(ibdLoopDelay) { - return siasync.ErrStopped - } - } - } - - cs.log.Printf("INFO: IBD done, synced with %v peers", numOutboundSynced) - return nil -} - -// managedSleep will sleep for the provided duration. -func (cs *ConsensusSet) managedSleep(d time.Duration) bool { - // Do a quick check whether the thread group is already stopped. - select { - case <-cs.tg.StopCtx().Done(): - return false - default: - } - - t := time.NewTimer(d) - select { - case <-t.C: - return true - case <-cs.tg.StopCtx().Done(): - } - - // tg has been stopped, clean up the timer and return false. - if !t.Stop() { - <-t.C - } - return false -} - -// Synced returns true if the consensus set is synced with the network. -func (cs *ConsensusSet) Synced() bool { - err := cs.tg.Add() - if err != nil { - return false - } - defer cs.tg.Done() - cs.mu.RLock() - defer cs.mu.RUnlock() - return cs.synced -} diff --git a/modules/consensus/validtransaction.go b/modules/consensus/validtransaction.go deleted file mode 100644 index 694c962..0000000 --- a/modules/consensus/validtransaction.go +++ /dev/null @@ -1,378 +0,0 @@ -package consensus - -import ( - "bytes" - "database/sql" - "errors" - "math/big" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - errAlteredRevisionPayouts = errors.New("file contract revision has altered payout volume") - errInvalidStorageProof = errors.New("provided storage proof is invalid") - errLateRevision = errors.New("file contract revision submitted after deadline") - errLowRevisionNumber = errors.New("transaction has a file contract with an outdated revision number") - errMissingSiacoinOutput = errors.New("transaction spends a nonexisting Siacoin output") - errSiacoinInputOutputMismatch = errors.New("Siacoin inputs do not equal Siacoin outputs for transaction") - errSiafundInputOutputMismatch = errors.New("Siafund inputs do not equal Siafund outputs for transaction") - errUnfinishedFileContract = errors.New("file contract window has not yet openend") - errUnrecognizedFileContractID = errors.New("cannot fetch storage proof segment for unknown file contract") - errWrongUnlockConditions = errors.New("transaction contains incorrect unlock conditions") - errUnsignedFoundationUpdate = errors.New("transaction contains an Foundation UnlockHash update with missing or invalid signatures") -) - -// validSiacoins checks that the siacoin inputs and outputs are valid in the -// context of the current consensus set. -func validSiacoins(tx *sql.Tx, t types.Transaction) error { - var inputSum types.Currency - for _, sci := range t.SiacoinInputs { - // Check that the input spends an existing output. - sco, exists, err := findSiacoinOutput(tx, sci.ParentID) - if err != nil { - return err - } - if !exists { - return errMissingSiacoinOutput - } - - // Check that the unlock conditions match the required unlock hash. - if sci.UnlockConditions.UnlockHash() != sco.Address { - return errWrongUnlockConditions - } - - inputSum = inputSum.Add(sco.Value) - } - if !inputSum.Equals(modules.SiacoinOutputSum(t)) { - return errSiacoinInputOutputMismatch - } - return nil -} - -// storageProofSegment returns the index of the segment that needs to be proven -// exists in a file contract. -func storageProofSegment(tx *sql.Tx, fcid types.FileContractID) (uint64, error) { - // Check that the parent file contract exists. - fc, exists, err := findFileContract(tx, fcid) - if err != nil { - return 0, err - } - if !exists { - return 0, errUnrecognizedFileContractID - } - - // Get the trigger block id. - triggerHeight := fc.WindowStart - 1 - if triggerHeight > blockHeight(tx) { - return 0, errUnfinishedFileContract - } - triggerID, err := getBlockAtHeight(tx, triggerHeight) - if err != nil { - return 0, err - } - - // Get the index by appending the file contract ID to the trigger block and - // taking the hash, then converting the hash to a numerical value and - // modding it against the number of segments in the file. The result is a - // random number in range [0, numSegments]. The probability is very - // slightly weighted towards the beginning of the file, but because the - // size difference between the number of segments and the random number - // being modded, the difference is too small to make any practical - // difference. - h := types.NewHasher() - triggerID.EncodeTo(h.E) - fcid.EncodeTo(h.E) - seed := h.Sum() - numSegments := int64(modules.CalculateLeaves(fc.Filesize)) - seedInt := new(big.Int).SetBytes(seed[:]) - index := seedInt.Mod(seedInt, big.NewInt(numSegments)).Uint64() - return index, nil -} - -// validStorageProofs checks that the storage proofs are valid in the context -// of the consensus set. -func validStorageProofs(tx *sql.Tx, t types.Transaction) error { - for _, sp := range t.StorageProofs { - // Check that the storage proof itself is valid. - segmentIndex, err := storageProofSegment(tx, sp.ParentID) - if err != nil { - return err - } - - fc, exists, err := findFileContract(tx, sp.ParentID) - if err != nil { - return err - } - if !exists { - return errors.New("storage contract not found") - } - - leaves := modules.CalculateLeaves(fc.Filesize) - segmentLen := uint64(modules.SegmentSize) - - // If this segment chosen is the final segment, it should only be as - // long as necessary to complete the filesize. - height := blockHeight(tx) - if segmentIndex == leaves-1 && height >= 21e3 { - segmentLen = fc.Filesize % modules.SegmentSize - } - if segmentLen == 0 { - segmentLen = uint64(modules.SegmentSize) - } - - verified := modules.VerifySegment( - sp.Leaf[:segmentLen], - sp.Proof, - leaves, - segmentIndex, - fc.FileMerkleRoot, - ) - if !verified && fc.Filesize > 0 { - return errInvalidStorageProof - } - } - - return nil -} - -// validFileContractRevision checks that each file contract revision is valid -// in the context of the current consensus set. -func validFileContractRevisions(tx *sql.Tx, t types.Transaction) error { - for _, fcr := range t.FileContractRevisions { - fc, exists, err := findFileContract(tx, fcr.ParentID) - if err != nil { - return err - } - if !exists { - return errors.New("storage contract not found") - } - - // Check that the height is less than fc.WindowStart - revisions are - // not allowed to be submitted once the storage proof window has - // opened. This reduces complexity for unconfirmed transactions. - if blockHeight(tx) > fc.WindowStart { - return errLateRevision - } - - // Check that the revision number of the revision is greater than the - // revision number of the existing file contract. - if fc.RevisionNumber >= fcr.RevisionNumber { - return errLowRevisionNumber - } - - // Check that the unlock conditions match the unlock hash. - if fcr.UnlockConditions.UnlockHash() != types.Address(fc.UnlockHash) { - return errWrongUnlockConditions - } - - // Check that the payout of the revision matches the payout of the - // original, and that the payouts match each other. - var valid, missed types.Currency - for _, output := range fcr.ValidProofOutputs { - valid = valid.Add(output.Value) - } - for _, output := range fcr.MissedProofOutputs { - missed = missed.Add(output.Value) - } - var oldPayout types.Currency - for _, output := range fc.ValidProofOutputs { - oldPayout = oldPayout.Add(output.Value) - } - if !valid.Equals(oldPayout) { - return errAlteredRevisionPayouts - } - if !missed.Equals(oldPayout) { - return errAlteredRevisionPayouts - } - } - return nil -} - -// validSiafunds checks that the siafund portions of the transaction are valid -// in the context of the consensus set. -func validSiafunds(tx *sql.Tx, t types.Transaction) (err error) { - // Compare the number of input siafunds to the output siafunds. - var siafundInputSum uint64 - var siafundOutputSum uint64 - for _, sfi := range t.SiafundInputs { - sfo, _, exists, err := findSiafundOutput(tx, sfi.ParentID) - if err != nil { - return err - } - if !exists { - return errors.New("unable to find SF output") - } - - // Check the unlock conditions match the unlock hash. - if sfi.UnlockConditions.UnlockHash() != sfo.Address { - return errWrongUnlockConditions - } - - siafundInputSum = siafundInputSum + sfo.Value - } - for _, sfo := range t.SiafundOutputs { - siafundOutputSum = siafundOutputSum + sfo.Value - } - if siafundOutputSum != siafundInputSum { - return errSiafundInputOutputMismatch - } - return -} - -// validArbitraryData checks that the ArbitraryData portions of the transaction are -// valid in the context of the consensus set. Currently, only ArbitraryData with -// the types.SpecifierFoundation prefix is examined. -func validArbitraryData(tx *sql.Tx, t types.Transaction, currentHeight uint64) error { - if currentHeight < modules.FoundationHardforkHeight { - return nil - } - for _, arb := range t.ArbitraryData { - if bytes.HasPrefix(arb, types.SpecifierFoundation[:]) { - // NOTE: modules.StandaloneValid ensures that the update is correctly encoded. - if !foundationUpdateIsSigned(tx, t) { - return errUnsignedFoundationUpdate - } - } - } - return nil -} - -// foundationUpdateIsSigned checks that the transaction has a signature that -// covers a SiacoinInput controlled by the primary or failsafe UnlockHash. To -// minimize surface area, the signature must cover the whole transaction. -// -// This function does not actually validate the signature. By the time -// foundationUpdateIsSigned is called, all of the transaction's signatures have -// already been validated by StandaloneValid. -func foundationUpdateIsSigned(tx *sql.Tx, t types.Transaction) bool { - primary, failsafe, err := getFoundationUnlockHashes(tx) - if err != nil { - return false - } - for _, sci := range t.SiacoinInputs { - // NOTE: this conditional is split up to better visualize test coverage. - if uh := sci.UnlockConditions.UnlockHash(); uh != primary { - if uh != failsafe { - continue - } - } - // Locate the corresponding signature. - for _, sig := range t.Signatures { - if sig.ParentID == types.Hash256(sci.ParentID) && sig.CoveredFields.WholeTransaction { - return true - } - } - } - return false -} - -// validTransaction checks that all fields are valid within the current -// consensus state. If not an error is returned. -func validTransaction(tx *sql.Tx, t types.Transaction) error { - // StandaloneValid will check things like signatures and properties that - // should be inherent to the transaction. (storage proof rules, etc.) - currentHeight := blockHeight(tx) - if err := modules.StandaloneValid(t, currentHeight); err != nil { - return err - } - - // Check that each portion of the transaction is legal given the current - // consensus set. - if err := validSiacoins(tx, t); err != nil { - return err - } - if err := validStorageProofs(tx, t); err != nil { - return err - } - if err := validFileContractRevisions(tx, t); err != nil { - return err - } - if err := validSiafunds(tx, t); err != nil { - return err - } - if err := validArbitraryData(tx, t, currentHeight); err != nil { - return err - } - return nil -} - -// tryTransactionSet applies the input transactions to the consensus set to -// determine if they are valid. An error is returned IF they are not a valid -// set in the current consensus set. The size of the transactions and the set -// is not checked. After the transactions have been validated, a consensus -// change is returned detailing the diffs that the transactions set would have. -func (cs *ConsensusSet) tryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) { - tx, err := cs.db.Begin() - if err != nil { - return modules.ConsensusChange{}, err - } - - // applyTransaction will apply the diffs from a transaction and store them - // in a block node. diffHolder is the blockNode that tracks the temporary - // changes. At the end of the function, all changes that were made to the - // consensus set get reverted. - diffHolder := new(processedBlock) - - // In the case of tryTransactionSet, we want to roll back the tx even if - // there is no error. So errSuccess is returned. - errSuccess := errors.New("success") - err = func(tx *sql.Tx) error { - diffHolder.Height = blockHeight(tx) - for _, txn := range txns { - if err := validTransaction(tx, txn); err != nil { - return err - } - applyTransaction(tx, diffHolder, txn) - } - return errSuccess - }(tx) - tx.Rollback() - if !modules.ContainsError(err, errSuccess) { - return modules.ConsensusChange{}, err - } - - cc := modules.ConsensusChange{ - ConsensusChangeDiffs: modules.ConsensusChangeDiffs{ - SiacoinOutputDiffs: diffHolder.SiacoinOutputDiffs, - FileContractDiffs: diffHolder.FileContractDiffs, - SiafundOutputDiffs: diffHolder.SiafundOutputDiffs, - DelayedSiacoinOutputDiffs: diffHolder.DelayedSiacoinOutputDiffs, - SiafundPoolDiffs: diffHolder.SiafundPoolDiffs, - }, - } - - return cc, nil -} - -// TryTransactionSet applies the input transactions to the consensus set to -// determine if they are valid. An error is returned IF they are not a valid -// set in the current consensus set. The size of the transactions and the set -// is not checked. After the transactions have been validated, a consensus -// change is returned detailing the diffs that the transactions set would have. -func (cs *ConsensusSet) TryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) { - err := cs.tg.Add() - if err != nil { - return modules.ConsensusChange{}, err - } - defer cs.tg.Done() - cs.mu.RLock() - defer cs.mu.RUnlock() - return cs.tryTransactionSet(txns) -} - -// LockedTryTransactionSet calls fn while under read-lock, passing it a -// version of TryTransactionSet that can be called under read-lock. This fixes -// an edge case in the transaction pool. -func (cs *ConsensusSet) LockedTryTransactionSet(fn func(func(txns []types.Transaction) (modules.ConsensusChange, error)) error) error { - err := cs.tg.Add() - if err != nil { - return err - } - defer cs.tg.Done() - cs.mu.RLock() - defer cs.mu.RUnlock() - return fn(cs.tryTransactionSet) -} diff --git a/modules/consts.go b/modules/consts.go index e4078d0..50e8d35 100644 --- a/modules/consts.go +++ b/modules/consts.go @@ -1,14 +1,9 @@ package modules import ( - "math/big" - "time" - "go.sia.tech/core/types" ) -var numGenesisSiacoins types.Currency - // BytesPerTerabyte is how many bytes are there in one TiB. const BytesPerTerabyte = 1024 * 1024 * 1024 * 1024 @@ -25,451 +20,7 @@ const ( BlocksPerYear = 365 * BlocksPerDay ) -// SectorSize defines how large a sector should be in bytes. The sector -// size needs to be a power of two to be compatible with package -// merkletree. 4MiB has been chosen for the live network because large -// sectors significantly reduce the tracking overhead experienced by the -// renter and the host. -const SectorSize = uint64(1 << 22) - -// EstimatedFileContractTransactionSetSize is the estimated blockchain size -// of a transaction set between a renter and a host that contains a file -// contract. This transaction set will contain a setup transaction from each -// the host and the renter, and will also contain a file contract and file -// contract revision that have each been signed by all parties. -const EstimatedFileContractTransactionSetSize = 2048 - -const ( - // InitialCoinbase is the coinbase reward of the Genesis block. - InitialCoinbase = uint64(300e3) - - // The minimum coinbase is set to 30,000. Because the coinbase - // decreases by 1 every time, it means that Sia's coinbase will have an - // increasingly potent dropoff for about 5 years, until inflation more - // or less permanently settles around 2%. - MinimumCoinbase = 30e3 - - // SiafundCount is the total number of Siafunds. - SiafundCount = 10000 -) - -const ( - // AcceptResponse is the response given to an RPC call to indicate - // acceptance, i.e. that the sender wishes to continue communication. - AcceptResponse = "accept" - - // StopResponse is the response given to an RPC call to indicate graceful - // termination, i.e. that the sender wishes to cease communication, but - // not due to an error. - StopResponse = "stop" -) - var ( // BlockBytesPerMonthTerabyte is the conversion rate between block-bytes and month-TB. BlockBytesPerMonthTerabyte = types.NewCurrency64(BytesPerTerabyte).Mul64(BlocksPerMonth) ) - -const ( - // BlockSizeLimit is the maximum size of a binary-encoded Block - // that is permitted by the consensus rules. - BlockSizeLimit = uint64(2e6) - - // A block time of 1 block per 10 minutes is chosen to follow Bitcoin's - // example. The security lost by lowering the block time is not - // insignificant, and the convenience gained by lowering the blocktime - // even down to 90 seconds is not significant. 10 minutes could even be - // too short, but it has worked well for Bitcoin. - BlockFrequency = 600 - - // When the difficulty is adjusted, it is adjusted by looking at the - // timestamp of the 1000th previous block. This minimizes the abilities - // of miners to attack the network using rogue timestamps. - TargetWindow = 1e3 - - // ASICHardforkHeight is the height at which the hardfork targeting - // selected ASICs was activated. - ASICHardforkHeight = uint64(179000) - - // ASICHardforkFactor is the factor by which the hashrate of targeted - // ASICs will be reduced. - ASICHardforkFactor = uint64(1009) - - // A total time of 120,000 is chosen because that represents the total - // time elapsed at a perfect equilibrium, indicating a visible average - // block time that perfectly aligns with what is expected. - ASICHardforkTotalTime = 120e3 - - // DevAddrHardforkHeight is the height at which the DevAddr hardfork was - // activated. - DevAddrHardforkHeight = uint64(10000) - - // The oak difficulty adjustment hardfork is set to trigger at block - // 135,000, which is just under 6 months after the hardfork was first - // released as beta software to the network. This hopefully gives - // everyone plenty of time to upgrade and adopt the hardfork, while also - // being earlier than the most optimistic shipping dates for the miners - // that would otherwise be very disruptive to the network. - // - // There was a bug in the original Oak hardfork that had to be quickly - // followed up with another fix. The height of that fix is the - // OakHardforkFixBlock. - OakHardforkBlock = 135e3 - OakHardforkFixBlock = 139e3 - - // OakHardforkTxnSizeLimit is the maximum size allowed for a transaction, - // a change which was implemented simultaneously with the oak hardfork. - OakHardforkTxnSizeLimit = uint64(64e3) // 64 KB - - // The decay is kept at 995/1000, or a decay of about 0.5% each block. - // This puts the halflife of a block's relevance at about 1 day. This - // allows the difficulty to adjust rapidly if the hashrate is adjusting - // rapidly, while still keeping a relatively strong insulation against - // random variance. - OakDecayNum = 995 - OakDecayDenom = 1e3 - - // The block shift determines the most that the difficulty adjustment - // algorithm is allowed to shift the target block time. With a block - // frequency of 600 seconds, the min target block time is 200 seconds, - // and the max target block time is 1800 seconds. - OakMaxBlockShift = 3 - - // TaxHardforkHeight is the height at which the tax hardfork occurred. - TaxHardforkHeight = uint64(21000) - - // FoundationHardforkHeight is the height at which the Foundation subsidy - // hardfork was activated. - FoundationHardforkHeight = uint64(298000) - - // The Foundation subsidy hardfork activates at approximately 11pm EST - // on February 3, 2021. - // Subsidies are paid out approximately once per month. Since actual - // months vary in length, we instead divide the total number of blocks - // per year by 12. - FoundationSubsidyFrequency = BlocksPerYear / 12 - - // Blocks will not be accepted if their timestamp is more than 3 hours - // into the future, but will be accepted as soon as they are no longer - // 3 hours into the future. Blocks that are greater than 5 hours into - // the future are rejected outright, as it is assumed that by the time - // 2 hours have passed, those blocks will no longer be on the longest - // chain. Blocks cannot be kept forever because this opens a DoS vector. - FutureThreshold = 3 * 60 * 60 // 3 hours. - ExtremeFutureThreshold = 5 * 60 * 60 // 5 hours. - - // Payouts take 1 day to mature. This is to prevent a class of double - // spending attacks where parties unintentionally spend coins that will - // stop existing after a blockchain reorganization. There are multiple - // classes of payouts in Sia that depend on a previous block - if that - // block changes, then the output changes and the previously existing - // output ceases to exist. This delay stops both unintentional double - // spending and a small set of long-range mining attacks. - MaturityDelay = 144 - - // MedianTimestampWindow tells us how many blocks to look back when - // calculating the median timestamp over the previous n blocks. The - // timestamp of a block is not allowed to be less than or equal to the - // median timestamp of the previous n blocks, where for Sia this number - // is typically 11. - MedianTimestampWindow = uint64(11) - - // StorageProofHardforkHeight is the height at which the storage proof - // hardfork was activated. - StorageProofHardforkHeight = uint64(100000) -) - -var ( - GenesisSiacoinAllocation = []types.SiacoinOutput{} - - GenesisSiafundAllocation = []types.SiafundOutput{ - { - Value: 2, - Address: types.Address{4, 57, 229, 188, 127, 20, 204, 245, 211, 167, 232, 130, 208, 64, 146, 62, 69, 98, 81, 102, 221, 7, 123, 100, 70, 107, 199, 113, 121, 26, 198, 252}, - }, - { - Value: 6, - Address: types.Address{4, 158, 29, 42, 105, 119, 43, 5, 138, 72, 190, 190, 101, 114, 79, 243, 189, 248, 208, 151, 30, 187, 233, 148, 225, 233, 28, 159, 19, 232, 75, 244}, - }, - { - Value: 7, - Address: types.Address{8, 7, 66, 250, 25, 74, 247, 108, 162, 79, 220, 151, 202, 228, 241, 11, 130, 138, 13, 248, 193, 167, 136, 197, 65, 63, 234, 174, 205, 216, 71, 230}, - }, - { - Value: 8, - Address: types.Address{44, 106, 239, 51, 138, 102, 242, 19, 204, 197, 248, 178, 219, 122, 152, 251, 19, 20, 52, 32, 175, 32, 4, 156, 73, 33, 163, 165, 222, 184, 217, 218}, - }, - { - Value: 3, - Address: types.Address{44, 163, 31, 233, 74, 103, 55, 132, 230, 159, 97, 78, 149, 147, 65, 110, 164, 211, 105, 173, 158, 29, 202, 43, 85, 217, 85, 75, 83, 37, 205, 223}, - }, - { - Value: 1, - Address: types.Address{51, 151, 146, 84, 199, 7, 59, 89, 111, 172, 227, 200, 62, 55, 165, 253, 238, 186, 28, 145, 47, 137, 200, 15, 70, 199, 187, 125, 243, 104, 179, 240}, - }, - { - Value: 10, - Address: types.Address{53, 118, 253, 229, 254, 229, 28, 131, 233, 156, 108, 58, 197, 152, 17, 160, 74, 252, 11, 49, 112, 240, 66, 119, 40, 98, 114, 251, 5, 86, 233, 117}, - }, - { - Value: 50, - Address: types.Address{56, 219, 3, 50, 28, 3, 166, 95, 141, 163, 202, 35, 60, 199, 219, 10, 151, 176, 228, 97, 176, 133, 189, 33, 211, 202, 83, 197, 31, 208, 254, 193}, - }, - { - Value: 75, - Address: types.Address{68, 190, 140, 87, 96, 232, 150, 32, 161, 177, 204, 65, 228, 223, 87, 217, 134, 90, 25, 56, 51, 45, 72, 107, 129, 12, 29, 202, 6, 7, 50, 13}, - }, - { - Value: 10, - Address: types.Address{69, 14, 201, 200, 90, 73, 245, 45, 154, 94, 161, 19, 199, 241, 203, 56, 13, 63, 5, 220, 121, 245, 247, 52, 194, 181, 252, 76, 130, 6, 114, 36}, - }, - { - Value: 10, - Address: types.Address{72, 128, 253, 207, 169, 48, 1, 26, 237, 205, 169, 102, 196, 224, 42, 186, 95, 151, 59, 226, 203, 136, 251, 223, 165, 38, 88, 110, 47, 213, 121, 224}, - }, - { - Value: 50, - Address: types.Address{72, 130, 164, 227, 218, 28, 60, 15, 56, 151, 212, 242, 77, 131, 232, 131, 42, 57, 132, 173, 113, 118, 66, 183, 38, 79, 96, 178, 105, 108, 26, 247}, - }, - { - Value: 10, - Address: types.Address{74, 210, 58, 228, 111, 69, 253, 120, 53, 195, 110, 26, 115, 76, 211, 202, 199, 159, 204, 14, 78, 92, 14, 131, 250, 22, 141, 236, 154, 44, 39, 135}, - }, - { - Value: 15, - Address: types.Address{85, 198, 154, 41, 196, 116, 226, 114, 202, 94, 214, 147, 87, 84, 247, 164, 195, 79, 58, 123, 26, 33, 68, 65, 116, 79, 181, 241, 241, 208, 215, 184}, - }, - { - Value: 121, - Address: types.Address{87, 239, 83, 125, 152, 14, 19, 22, 203, 136, 46, 192, 203, 87, 224, 190, 77, 236, 125, 18, 142, 223, 146, 70, 16, 23, 252, 19, 100, 69, 91, 111}, - }, - { - Value: 222, - Address: types.Address{91, 201, 101, 11, 188, 40, 35, 111, 236, 133, 31, 124, 97, 246, 140, 136, 143, 245, 152, 174, 111, 245, 188, 124, 21, 125, 187, 192, 203, 92, 253, 57}, - }, - { - Value: 10, - Address: types.Address{110, 240, 238, 173, 78, 138, 185, 138, 179, 227, 135, 153, 54, 132, 46, 62, 226, 206, 204, 35, 174, 107, 156, 15, 142, 2, 93, 132, 163, 60, 50, 89}, - }, - { - Value: 3, - Address: types.Address{114, 58, 147, 44, 64, 69, 72, 184, 65, 178, 213, 94, 157, 44, 88, 106, 92, 31, 145, 193, 215, 200, 215, 233, 99, 116, 36, 197, 160, 70, 79, 153}, - }, - { - Value: 1, - Address: types.Address{123, 106, 229, 101, 220, 252, 50, 203, 38, 183, 133, 152, 250, 167, 210, 155, 252, 102, 150, 29, 187, 3, 178, 53, 11, 145, 143, 33, 166, 115, 250, 40}, - }, - { - Value: 5, - Address: types.Address{124, 101, 207, 175, 50, 119, 207, 26, 62, 15, 247, 141, 150, 174, 73, 247, 238, 28, 77, 255, 222, 104, 166, 244, 112, 86, 227, 80, 215, 45, 69, 143}, - }, - { - Value: 10, - Address: types.Address{130, 184, 72, 15, 227, 79, 217, 205, 120, 254, 67, 69, 10, 49, 76, 194, 222, 30, 242, 62, 88, 179, 51, 117, 27, 166, 140, 6, 7, 22, 222, 185}, - }, - { - Value: 25, - Address: types.Address{134, 137, 198, 172, 96, 54, 45, 10, 100, 128, 91, 225, 226, 134, 143, 108, 31, 70, 187, 228, 54, 212, 70, 229, 149, 57, 64, 166, 153, 123, 238, 180}, - }, - { - Value: 1, - Address: types.Address{143, 253, 118, 229, 109, 181, 141, 224, 91, 144, 123, 160, 203, 221, 119, 104, 172, 13, 105, 77, 171, 185, 122, 54, 229, 168, 6, 130, 160, 130, 182, 151}, - }, - { - Value: 8, - Address: types.Address{147, 108, 249, 16, 36, 249, 108, 184, 196, 212, 241, 120, 219, 63, 45, 184, 86, 53, 96, 207, 130, 96, 210, 251, 136, 9, 193, 160, 131, 198, 221, 185}, - }, - { - Value: 58, - Address: types.Address{155, 79, 89, 28, 69, 71, 239, 198, 246, 2, 198, 254, 92, 59, 192, 205, 229, 152, 36, 186, 110, 122, 233, 221, 76, 143, 3, 238, 89, 231, 192, 23}, - }, - { - Value: 2, - Address: types.Address{156, 32, 76, 105, 213, 46, 66, 50, 27, 85, 56, 9, 106, 193, 80, 145, 19, 101, 84, 177, 145, 4, 125, 28, 79, 252, 43, 83, 118, 110, 206, 247}, - }, - { - Value: 23, - Address: types.Address{157, 169, 134, 24, 254, 22, 58, 188, 119, 87, 201, 238, 55, 168, 194, 131, 88, 18, 39, 168, 37, 2, 198, 194, 93, 202, 116, 146, 189, 17, 108, 44}, - }, - { - Value: 10, - Address: types.Address{158, 51, 104, 36, 242, 114, 67, 16, 168, 230, 4, 111, 241, 72, 5, 14, 182, 102, 169, 156, 144, 220, 103, 117, 223, 8, 58, 187, 124, 102, 80, 44}, - }, - { - Value: 1, - Address: types.Address{160, 175, 59, 33, 223, 30, 82, 60, 34, 110, 28, 203, 249, 93, 3, 16, 218, 12, 250, 206, 138, 231, 85, 67, 69, 191, 68, 198, 160, 87, 154, 68}, - }, - { - Value: 75, - Address: types.Address{163, 94, 51, 220, 14, 144, 83, 112, 62, 10, 0, 173, 161, 234, 211, 176, 186, 84, 9, 189, 250, 111, 33, 231, 114, 87, 100, 75, 72, 217, 11, 26}, - }, - { - Value: 3, - Address: types.Address{170, 7, 138, 116, 205, 20, 132, 197, 166, 251, 75, 93, 69, 6, 109, 244, 212, 119, 173, 114, 34, 18, 25, 21, 111, 203, 203, 253, 138, 104, 27, 36}, - }, - { - Value: 90, - Address: types.Address{173, 120, 128, 104, 186, 86, 151, 140, 191, 23, 231, 193, 77, 245, 243, 104, 196, 55, 155, 243, 111, 15, 84, 139, 148, 187, 173, 47, 104, 69, 141, 39}, - }, - { - Value: 20, - Address: types.Address{179, 185, 228, 166, 139, 94, 13, 193, 255, 227, 174, 99, 120, 105, 109, 221, 247, 4, 155, 243, 229, 37, 26, 98, 222, 12, 91, 80, 223, 33, 61, 56}, - }, - { - Value: 5, - Address: types.Address{193, 49, 103, 20, 170, 135, 182, 85, 149, 18, 159, 194, 152, 120, 162, 208, 49, 158, 220, 188, 114, 79, 1, 131, 62, 27, 86, 57, 244, 46, 64, 66}, - }, - { - Value: 1, - Address: types.Address{196, 71, 45, 222, 0, 21, 12, 121, 197, 224, 101, 65, 40, 57, 19, 119, 112, 205, 166, 23, 2, 91, 75, 231, 69, 143, 221, 68, 245, 75, 7, 52}, - }, - { - Value: 44, - Address: types.Address{196, 214, 236, 211, 227, 216, 152, 127, 164, 2, 235, 14, 235, 46, 142, 231, 83, 38, 7, 131, 208, 29, 179, 189, 62, 88, 129, 180, 119, 158, 214, 97}, - }, - { - Value: 23, - Address: types.Address{206, 58, 114, 148, 131, 49, 87, 197, 86, 18, 216, 26, 62, 79, 152, 175, 33, 4, 132, 160, 108, 231, 53, 200, 48, 76, 125, 94, 156, 85, 32, 130}, - }, - { - Value: 80, - Address: types.Address{200, 103, 135, 126, 197, 2, 203, 63, 241, 6, 245, 195, 220, 102, 27, 74, 232, 249, 201, 86, 207, 34, 51, 26, 180, 151, 136, 108, 112, 56, 132, 72}, - }, - { - Value: 2, - Address: types.Address{200, 249, 245, 218, 58, 253, 76, 250, 88, 114, 70, 239, 14, 2, 250, 123, 10, 192, 198, 61, 187, 155, 247, 152, 165, 174, 198, 24, 142, 39, 177, 119}, - }, - { - Value: 1, - Address: types.Address{209, 1, 199, 184, 186, 57, 21, 137, 33, 252, 219, 184, 130, 38, 32, 98, 63, 252, 250, 79, 70, 146, 169, 78, 180, 161, 29, 93, 38, 45, 175, 176}, - }, - { - Value: 2, - Address: types.Address{212, 107, 233, 43, 185, 138, 79, 253, 12, 237, 214, 17, 219, 198, 151, 92, 81, 129, 17, 120, 139, 58, 66, 119, 126, 220, 132, 136, 3, 108, 57, 58}, - }, - { - Value: 3, - Address: types.Address{214, 244, 146, 173, 173, 80, 33, 185, 29, 133, 77, 167, 185, 1, 38, 23, 111, 179, 104, 150, 105, 162, 120, 26, 245, 63, 114, 119, 52, 1, 44, 222}, - }, - { - Value: 1, - Address: types.Address{217, 218, 172, 16, 53, 134, 160, 226, 44, 138, 93, 53, 181, 62, 4, 209, 190, 27, 0, 93, 105, 17, 169, 61, 98, 145, 131, 112, 121, 55, 97, 184}, - }, - { - Value: 1, - Address: types.Address{223, 162, 172, 55, 54, 193, 37, 142, 200, 213, 230, 48, 186, 145, 184, 206, 15, 225, 167, 19, 37, 70, 38, 48, 135, 87, 205, 81, 187, 237, 181, 180}, - }, - { - Value: 1, - Address: types.Address{241, 46, 139, 41, 40, 63, 47, 169, 131, 173, 124, 246, 228, 213, 102, 44, 100, 217, 62, 237, 133, 154, 248, 69, 228, 2, 36, 206, 47, 250, 249, 170}, - }, - { - Value: 50, - Address: types.Address{241, 50, 229, 211, 66, 32, 115, 241, 117, 87, 180, 239, 76, 246, 14, 129, 105, 181, 153, 105, 105, 203, 229, 237, 23, 130, 193, 170, 100, 201, 38, 71}, - }, - { - Value: 8841, - Address: types.Address{125, 12, 68, 247, 102, 78, 45, 52, 229, 62, 253, 224, 102, 26, 111, 98, 142, 201, 38, 71, 133, 174, 142, 60, 215, 201, 115, 232, 209, 144, 195, 201}, - }, - } - - // The genesis timestamp is set to June 6th, because that is when the - // 100-block developer premine started. The trailing zeroes are a - // bonus, and make the timestamp easier to memorize. - GenesisTimestamp = time.Unix(1433600000, 0) // June 6th, 2015 @ 2:13pm UTC. - - // GenesisBlock is the first block of the block chain. - GenesisBlock = types.Block{ - Timestamp: GenesisTimestamp, - Transactions: []types.Transaction{ - { - SiacoinOutputs: GenesisSiacoinAllocation, - SiafundOutputs: GenesisSiafundAllocation, - }, - }, - } - - // GenesisID is used in many places. Calculating it once saves lots of - // redundant computation. - GenesisID = GenesisBlock.ID() - - // RootDepth is the cumulative target of all blocks. The root depth is - // essentially the maximum possible target, there have been no blocks yet, - // so there is no cumulated difficulty yet. - RootDepth = Target{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} - - // The RootTarget was set such that the developers could reasonable - // premine 100 blocks in a day. It was known to the developers at launch - // this this was at least one and perhaps two orders of magnitude too - // small. - RootTarget = Target{0, 0, 0, 0, 32} - - // The difficulty adjustment is clamped to 2.5x every 500 blocks. This - // corresponds to 6.25x every 2 weeks, which can be compared to - // Bitcoin's clamp of 4x every 2 weeks. The difficulty clamp is - // primarily to stop difficulty raising attacks. Sia's safety margin is - // similar to Bitcoin's despite the looser clamp because Sia's - // difficulty is adjusted four times as often. This does result in - // greater difficulty oscillation, a tradeoff that was chosen to be - // acceptable due to Sia's more vulnerable position as an altcoin. - MaxTargetAdjustmentUp = big.NewRat(25, 10) - MaxTargetAdjustmentDown = big.NewRat(10, 25) - - // A total target of 67 leading zeroes is chosen because that aligns - // with the amount of hashrate that we expect to be on the network - // after the hardfork. - ASICHardforkTotalTarget = Target{0, 0, 0, 0, 0, 0, 0, 0, 32} - - // The max rise and max drop for the difficulty is kept at 0.4% per - // block, which means that in 1008 blocks the difficulty can move a - // maximum of about 55x. This is significant, and means that dramatic - // hashrate changes can be responded to quickly, while still forcing an - // attacker to do a significant amount of work in order to execute a - // difficulty raising attack, and minimizing the chance that an attacker - // can get lucky and fake a ton of work. - OakMaxRise = big.NewRat(1004, 1e3) - OakMaxDrop = big.NewRat(1e3, 1004) - - // InitialFoundationSubsidy is the one-time subsidy sent to the Foundation - // address upon activation of the hardfork, representing one year's worth of - // block subsidies. - InitialFoundationSubsidy = types.HastingsPerSiacoin.Mul64(30e3).Mul64(BlocksPerYear) - - // FoundationSubsidyPerBlock is the amount allocated to the Foundation - // subsidy per block. - FoundationSubsidyPerBlock = types.HastingsPerSiacoin.Mul64(30e3) - - // ASICHardforkReplayProtectionPrefix is a byte that prefixes - // SiacoinInputs and SiafundInputs when calculating SigHashes to protect - // against replay attacks. - ASICHardforkReplayProtectionPrefix = []byte{0} - - // FoundationHardforkReplayProtectionPrefix is a byte that prefixes - // SiacoinInputs and SiafundInputs when calculating SigHashes to protect - // against replay attacks. - FoundationHardforkReplayProtectionPrefix = []byte{1} - - // InitialFoundationUnlockHash is the primary Foundation subsidy address. It - // receives the initial Foundation subsidy. The keys that this address was - // derived from can also be used to set a new primary and failsafe address. - InitialFoundationUnlockHash types.Address - - // InitialFoundationFailsafeUnlockHash is the "backup" Foundation address. - // It does not receive the Foundation subsidy, but its keys can be used to - // set a new primary and failsafe address. These UnlockConditions should - // also be subject to a timelock that prevents the failsafe from being used - // immediately. - InitialFoundationFailsafeUnlockHash types.Address -) - -func init() { - // Calculate the initial coinbase. - for _, tx := range GenesisBlock.Transactions { - for _, sco := range tx.SiacoinOutputs { - numGenesisSiacoins = numGenesisSiacoins.Add(sco.Value) - } - } - - // Parse the Foundation unlock hashes. - InitialFoundationUnlockHash, _ = types.ParseAddress("053b2def3cbdd078c19d62ce2b4f0b1a3c5e0ffbeeff01280efb1f8969b2f5bb4fdc680f0807") - InitialFoundationFailsafeUnlockHash, _ = types.ParseAddress("27c22a6c6e6645802a3b8fa0e5374657438ef12716d2205d3e866272de1b644dbabd53d6d560") -} diff --git a/modules/encoding.go b/modules/encoding.go deleted file mode 100644 index 654a0fa..0000000 --- a/modules/encoding.go +++ /dev/null @@ -1,69 +0,0 @@ -package modules - -import ( - "go.sia.tech/core/types" -) - -// WriteUint64 writes an uint64 to the underlying stream. -func WriteUint64(i uint64, e *types.Encoder) { - e.WriteUint64(8) - e.WriteUint64(i) - e.Flush() -} - -// WriteBool writes a bool to the underlying stream. -func WriteBool(b bool, e *types.Encoder) { - e.WriteUint64(1) - e.WriteBool(b) - e.Flush() -} - -// WriteString writes a string to the underlying stream. -func WriteString(s string, e *types.Encoder) { - e.WritePrefix(len(s) + 8) - e.WriteString(s) - e.Flush() -} - -// WriteBytes writes a byte slice to the underlying stream. -func WriteBytes(b []byte, e *types.Encoder) { - e.WritePrefix(len(b) + 8) - e.WriteBytes(b) - e.Flush() -} - -// ReadUint64 reads an uint64 from the underlying stream. -func ReadUint64(d *types.Decoder) uint64 { - _ = d.ReadUint64() - if err := d.Err(); err != nil { - return 0 - } - return d.ReadUint64() -} - -// ReadBool reads a bool from the underlying stream. -func ReadBool(d *types.Decoder) bool { - _ = d.ReadUint64() - if err := d.Err(); err != nil { - return false - } - return d.ReadBool() -} - -// ReadString reads a string from the underlying stream. -func ReadString(d *types.Decoder) string { - _ = d.ReadUint64() - if err := d.Err(); err != nil { - return "" - } - return d.ReadString() -} - -// ReadBytes reads a byte slice from the underlying stream. -func ReadBytes(d *types.Decoder) []byte { - _ = d.ReadUint64() - if err := d.Err(); err != nil { - return make([]byte, 0) - } - return d.ReadBytes() -} diff --git a/modules/encrypt.go b/modules/encrypt.go deleted file mode 100644 index 0cf17d7..0000000 --- a/modules/encrypt.go +++ /dev/null @@ -1,46 +0,0 @@ -package modules - -import ( - "crypto/cipher" - "errors" - - "golang.org/x/crypto/twofish" - - "lukechampine.com/frand" -) - -// ErrInsufficientLen is an error when supplied ciphertext is not -// long enough to contain a nonce. -var ErrInsufficientLen = errors.New("supplied ciphertext is not long enough to contain a nonce") - -// createCipher returns an initialized TwoFish cipher. -func createCipher(key WalletKey) (cipher.AEAD, error) { - c, err := twofish.NewCipher(key[:]) - if err != nil { - return nil, errors.New("NewCipher only returns an error if len(key) != 16, 24, or 32.") - } - return cipher.NewGCM(c) -} - -// Encrypt encrypts the plaintext with the provided key. -func Encrypt(key WalletKey, plaintext []byte) ([]byte, error) { - aead, err := createCipher(key) - if err != nil { - return nil, errors.New("NewGCM only returns an error if twofishCipher.BlockSize != 16") - } - nonce := frand.Bytes(aead.NonceSize()) - return aead.Seal(nonce, nonce, plaintext, nil), nil -} - -// Decrypt decrypts the ciphertext using the provided key. -func Decrypt(key WalletKey, ciphertext []byte) ([]byte, error) { - aead, err := createCipher(key) - if err != nil { - return nil, errors.New("NewGCM only returns an error if twofishCipher.BlockSize != 16") - } - if len(ciphertext) < aead.NonceSize() { - return nil, ErrInsufficientLen - } - nonce, ciphertext := ciphertext[:aead.NonceSize()], ciphertext[aead.NonceSize():] - return aead.Open(nil, nonce, ciphertext, nil) -} diff --git a/modules/gateway.go b/modules/gateway.go deleted file mode 100644 index 827031b..0000000 --- a/modules/gateway.go +++ /dev/null @@ -1,208 +0,0 @@ -package modules - -import ( - "net" - - "go.sia.tech/core/types" -) - -var ( - // BootstrapPeers is a list of peers that can be used to find other peers - - // when a client first connects to the network, the only options for - // finding peers are either manual entry of peers or to use a hardcoded - // bootstrap point. While the bootstrap point could be a central service, - // it can also be a list of peers that are known to be stable. We have - // chosen to hardcode known-stable peers. - // - // These peers have been verified to be v1.5.4 or higher. - BootstrapPeers = []NetAddress{ - "82.65.206.23:9981", - "135.181.208.120:9981", - "176.106.59.120:9981", - "144.91.127.102:9981", - "109.195.83.186:9981", - "96.60.27.112:5151", - "144.217.7.188:9981", - "5.9.19.246:9981", - "94.110.4.134:9981", - "208.88.8.166:5151", - "139.162.81.190:9981", - "5.19.177.22:9981", - "13.113.190.118:9981", - "134.209.89.131:9981", - "176.104.8.173:9981", - "81.25.225.178:9981", - "147.135.23.200:9981", - "76.216.70.162:9981", - "72.8.228.138:9981", - "164.138.31.105:9981", - "78.69.28.182:9861", - "176.215.255.127:9981", - "72.69.188.134:9981", - "71.237.62.44:9981", - "95.211.140.149:9981", - "5.196.66.75:9981", - "2.26.62.61:9981", - "88.98.208.124:9981", - "50.116.14.37:9981", - "173.235.144.230:9981", - "73.74.104.175:10031", - "199.195.252.152:9981", - "135.181.142.113:9981", - "148.251.152.35:9981", - "148.251.125.117:9981", - "136.52.87.46:9981", - "82.217.213.145:9981", - "81.7.16.159:9681", - "146.52.104.39:9981", - "78.197.237.216:9981", - "162.196.91.121:9981", - "148.251.82.174:9981", - "65.21.79.100:9981", - "103.76.41.143:9981", - "81.98.132.81:9981", - "109.236.92.161:9981", - "167.86.109.162:9981", - "139.162.187.240:9991", - "92.83.254.237:9791", - "80.101.32.17:9981", - "81.196.138.172:11152", - "50.54.136.187:9981", - "5.39.76.82:9981", - "92.90.91.29:9981", - "116.202.87.160:9981", - "13.212.22.103:9981", - "5.141.81.96:9981", - "66.176.169.2:9981", - "92.58.23.92:9981", - "149.248.110.111:9981", - "142.182.59.54:9981", - "190.111.196.118:9981", - "54.38.120.222:9981", - "109.195.166.133:9981", - "172.104.15.208:9981", - "65.30.133.139:9981", - "95.217.180.130:9981", - "82.223.202.234:9981", - "82.64.236.171:9981", - "188.122.0.242:9991", - "121.41.105.53:9981", - "161.97.176.97:9981", - "71.178.248.177:9981", - "89.69.17.157:9981", - "63.141.234.114:9981", - } -) - -type ( - // Peer contains all the info necessary to Broadcast to a peer. - Peer struct { - Inbound bool `json:"inbound"` - Local bool `json:"local"` - NetAddress NetAddress `json:"netaddress"` - Version string `json:"version"` - } - - // A PeerConn is the connection type used when communicating with peers during - // an RPC. It is identical to a net.Conn with the additional RPCAddr method. - // This method acts as an identifier for peers and is the address that the - // peer can be dialed on. It is also the address that should be used when - // calling an RPC on the peer. - PeerConn interface { - net.Conn - RPCAddr() NetAddress - } - - // RPCFunc is the type signature of functions that handle RPCs. It is used for - // both the caller and the callee. RPCFuncs may perform locking. RPCFuncs may - // close the connection early, and it is recommended that they do so to avoid - // keeping the connection open after all necessary I/O has been performed. - RPCFunc func(PeerConn) error - - // A Gateway facilitates the interactions between the local node and remote - // nodes (peers). It relays incoming blocks and transactions to local modules, - // and broadcasts outgoing blocks and transactions to peers. In a broad sense, - // it is responsible for ensuring that the local consensus set is consistent - // with the "network" consensus set. - Gateway interface { - Alerter - - // Connect establishes a persistent connection to a peer. - Connect(NetAddress) error - - // ConnectManual is a Connect wrapper for a user-initiated Connect. - ConnectManual(NetAddress) error - - // Disconnect terminates a connection to a peer. - Disconnect(NetAddress) error - - // DiscoverAddress discovers and returns the current public IP address - // of the gateway. Contrary to Address, DiscoverAddress is blocking and - // might take multiple minutes to return. A channel to cancel the - // discovery can be supplied optionally. - DiscoverAddress(cancel <-chan struct{}) (net.IP, error) - - // ForwardPort adds a port mapping to the router. It will block until - // the mapping is established or until it is interrupted by a shutdown. - ForwardPort(port string) error - - // DisconnectManual is a Disconnect wrapper for a user-initiated - // disconnect. - DisconnectManual(NetAddress) error - - // AddToBlocklist adds addresses to the blocklist of the gateway. - AddToBlocklist(addresses []string) error - - // Blocklist returns the current blocklist of the Gateway. - Blocklist() ([]string, error) - - // RemoveFromBlocklist removes addresses from the blocklist of the - // gateway. - RemoveFromBlocklist(addresses []string) error - - // SetBlocklist sets the blocklist of the gateway. - SetBlocklist(addresses []string) error - - // Address returns the Gateway's address. - Address() NetAddress - - // Peers returns the addresses that the Gateway is currently connected - // to. - Peers() []Peer - - // RegisterRPC registers a function to handle incoming connections that - // supply the given RPC ID. - RegisterRPC(string, RPCFunc) - - // UnregisterRPC unregisters an RPC and removes all references to the - // RPCFunc supplied in the corresponding RegisterRPC call. References to - // RPCFuncs registered with RegisterConnectCall are not removed and - // should be removed with UnregisterConnectCall. If the RPC does not - // exist no action is taken. - UnregisterRPC(string) - - // RegisterConnectCall registers an RPC name and function to be called - // upon connecting to a peer. - RegisterConnectCall(string, RPCFunc) - - // UnregisterConnectCall unregisters an RPC and removes all references to the - // RPCFunc supplied in the corresponding RegisterConnectCall call. References - // to RPCFuncs registered with RegisterRPC are not removed and should be - // removed with UnregisterRPC. If the RPC does not exist no action is taken. - UnregisterConnectCall(string) - - // RPC calls an RPC on the given address. RPC cannot be called on an - // address that the Gateway is not connected to. - RPC(NetAddress, string, RPCFunc) error - - // Broadcast transmits obj, prefaced by the RPC name, to all of the - // given peers in parallel. - Broadcast(name string, obj types.EncoderTo, peers []Peer) - - // Online returns true if the gateway is connected to remote hosts. - Online() bool - - // Close safely stops the Gateway's listener process. - Close() error - } -) diff --git a/modules/gateway/alert.go b/modules/gateway/alert.go deleted file mode 100644 index 504fbad..0000000 --- a/modules/gateway/alert.go +++ /dev/null @@ -1,8 +0,0 @@ -package gateway - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the gateway. -func (g *Gateway) Alerts() (crit, err, warn, info []modules.Alert) { - return g.staticAlerter.Alerts() -} diff --git a/modules/gateway/conn.go b/modules/gateway/conn.go deleted file mode 100644 index 652c3bb..0000000 --- a/modules/gateway/conn.go +++ /dev/null @@ -1,47 +0,0 @@ -package gateway - -import ( - "net" - "time" - - "github.com/mike76-dev/sia-satellite/modules" -) - -// peerConn is a simple type that implements the modules.PeerConn interface. -type peerConn struct { - net.Conn - dialbackAddr modules.NetAddress -} - -// RPCAddr implements the RPCAddr method of the modules.PeerConn interface. It -// is the address that identifies a peer. -func (pc peerConn) RPCAddr() modules.NetAddress { - return pc.dialbackAddr -} - -// newLocalAddr creates a TCPAddr to be used as the LocalAddr for the Dialer. -func newLocalAddr(addr modules.NetAddress) *net.TCPAddr { - ip := net.ParseIP(addr.Host()) - return &net.TCPAddr{ - IP: ip, - Zone: "", - } -} - -// staticDial will staticDial the input address and return a connection. -// staticDial appropriately handles things like clean shutdown, fast shutdown, -// and chooses the correct communication protocol. -func (g *Gateway) staticDial(addr modules.NetAddress) (net.Conn, error) { - dialer := &net.Dialer{ - Cancel: g.threads.StopChan(), - Timeout: dialTimeout, - } - - conn, err := dialer.Dial("tcp", string(addr)) - if err != nil { - return nil, err - } - conn.SetDeadline(time.Now().Add(connStdDeadline)) - - return conn, nil -} diff --git a/modules/gateway/consts.go b/modules/gateway/consts.go deleted file mode 100644 index 87d96d1..0000000 --- a/modules/gateway/consts.go +++ /dev/null @@ -1,166 +0,0 @@ -package gateway - -import ( - "time" - - "github.com/mike76-dev/sia-satellite/modules" -) - -// Constants related to the gateway's alerts. -const ( - // AlertMSGGatewayOffline indicates that the last time the gateway checked - // the network status it was offline. - AlertMSGGatewayOffline = "not connected to the internet" -) - -const ( - // handshakeUpgradeVersion is the version where the gateway handshake RPC - // was altered to include additional information transfer. - handshakeUpgradeVersion = "1.0.0" - - // maxEncodedSessionHeaderSize is the maximum allowed size of an encoded - // sessionHeader object. - maxEncodedSessionHeaderSize = 40 + modules.MaxEncodedNetAddressLength - - // maxLocalOutbound is currently set to 3, meaning the gateway will not - // consider a local node to be an outbound peer if the gateway already has - // 3 outbound peers. Three is currently needed to handle situations where - // the gateway is at high risk of connecting to itself (such as a low - // number of total peers, especially such as in a testing environment). - // Once the gateway has a proper way to figure out that it's trying to - // connect to itself, this number can be reduced. - maxLocalOutboundPeers = 3 - - // saveFrequency defines how often the gateway saves its persistence. - saveFrequency = time.Minute * 2 - - // minimumAcceptablePeerVersion is the oldest version for which we accept - // incoming connections. This version is usually raised if changes to the - // codebase were made that weren't backwards compatible. This might include - // changes to the protocol or hardforks. - minimumAcceptablePeerVersion = "1.5.4" -) - -const ( - // fastNodePurgeDelay defines the amount of time that is waited between each - // iteration of the purge loop when the gateway has enough nodes to be - // needing to purge quickly. - fastNodePurgeDelay = 1 * time.Minute - - // healthyNodeListLen defines the number of nodes that the gateway must - // have in the node list before it will stop asking peers for more nodes. - healthyNodeListLen = 200 - - // maxSharedNodes defines the number of nodes that will be shared between - // peers when they are expanding their node lists. - maxSharedNodes = uint64(10) - - // nodeListDelay defines the amount of time that is waited between each - // iteration of the node list loop. - nodeListDelay = 5 * time.Second - - // nodePurgeDelay defines the amount of time that is waited between each - // iteration of the node purge loop. - nodePurgeDelay = 10 * time.Minute - - // onlineCheckFrequency defines how often the gateway calls 'Online' in - // threadedOnlineCheck. - onlineCheckFrequency = 30 * time.Second - - // peerRPCDelay defines the amount of time waited between each RPC accepted - // from a peer. Without this delay, a peer can force us to spin up thousands - // of goroutines per second. - peerRPCDelay = 3 * time.Second - - // pruneNodeListLen defines the number of nodes that the gateway must have - // to be pruning nodes from the node list. - pruneNodeListLen = 50 - - // quickPruneListLen defines the number of nodes that the gateway must have - // to be pruning nodes quickly from the node list. - quickPruneListLen = 250 -) - -const ( - // The gateway will sleep this long between incoming connections. For - // attack reasons, the acceptInterval should be longer than the - // nodeListDelay. Right at startup, a node is vulnerable to being flooded - // by Sybil attackers. The node's best defense is to wait until it has - // filled out its nodelist somewhat from the bootstrap nodes. An attacker - // needs to completely dominate the nodelist and the peerlist to be - // successful, so just a few honest nodes from requests to the bootstraps - // should be enough to fend from most attacks. - acceptInterval = 6 * time.Second - - // acquiringPeersDelay defines the amount of time that is waited between - // iterations of the peer acquisition loop if the gateway is actively - // forming new connections with peers. - acquiringPeersDelay = 5 * time.Second - - // fullyConnectedThreshold defines the number of peers that the gateway can - // have before it stops accepting inbound connections. - fullyConnectedThreshold = 128 - - // maxConcurrentOutboundPeerRequests defines the maximum number of peer - // connections that the gateway will try to form concurrently. - maxConcurrentOutboundPeerRequests = 3 - - // noNodesDelay defines the amount of time that is waited between - // iterations of the peer acquisition loop if the gateway does not have any - // nodes in the nodelist. - noNodesDelay = 20 * time.Second - - // unwawntedLocalPeerDelay defines the amount of time that is waited - // between iterations of the permanentPeerManager if the gateway has at - // least a few outbound peers, but is not well connected, and the recently - // selected peer was a local peer. The wait is mostly to prevent the - // gateway from hogging the CPU in the event that all peers are local - // peers. - unwantedLocalPeerDelay = 2 * time.Second - - // wellConnectedDelay defines the amount of time that is waited between - // iterations of the peer acquisition loop if the gateway is well - // connected. - wellConnectedDelay = 5 * time.Minute - - // wellConnectedThreshold is the number of outbound connections at which - // the gateway will not attempt to make new outbound connections. - wellConnectedThreshold = 8 -) - -const ( - // connStdDeadline defines the standard deadline that should be used for - // all temporary connections to the gateway. - connStdDeadline = 5 * time.Minute - - // the gateway will abort a connection attempt after this long. - dialTimeout = 3 * time.Minute - - // rpcStdDeadline defines the standard deadline that should be used for all - // incoming RPC calls. - rpcStdDeadline = 5 * time.Minute -) - -const ( - // minPeersForIPDiscovery is the minimum number of peer connections we wait - // for before we try to discover our public ip from them. It is also the - // minimum number of successful replies we expect from our peers before we - // accept a result. - minPeersForIPDiscovery = 5 - - // timeoutIPDiscovery is the time after which managedIPFromPeers will fail - // if the ip couldn't be discovered successfully. - timeoutIPDiscovery = 5 * time.Minute - - // rediscoverIPIntervalSuccess is the time that has to pass after a - // successful IP discovery before we rediscover the IP. - rediscoverIPIntervalSuccess = 3 * time.Hour - - // rediscoverIPIntervalFailure is the time that has to pass after a failed - // IP discovery before we try again. - rediscoverIPIntervalFailure = 15 * time.Minute - - // peerDiscoveryRetryInterval is the time we wait when there were not - // enough peers to determine our public ip address before trying again. - peerDiscoveryRetryInterval = 10 * time.Second -) diff --git a/modules/gateway/gateway.go b/modules/gateway/gateway.go deleted file mode 100644 index 19f74de..0000000 --- a/modules/gateway/gateway.go +++ /dev/null @@ -1,388 +0,0 @@ -package gateway - -import ( - "database/sql" - "errors" - "fmt" - "net" - "path/filepath" - "sync" - "time" - - siasync "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - - "lukechampine.com/frand" -) - -// ProtocolVersion is the current version of the gateway p2p protocol. -const ProtocolVersion = "1.5.4" - -var ( - errNoPeers = errors.New("no peers") - errNilDB = errors.New("cannot have a nil database as a dependency") -) - -// Gateway implements the modules.Gateway interface. -type Gateway struct { - db *sql.DB - listener net.Listener - myAddr modules.NetAddress - port string - - // handlers are the RPCs that the Gateway can handle. - // initRPCs are the RPCs that the Gateway calls upon connecting to a peer. - handlers map[rpcID]modules.RPCFunc - initRPCs map[string]modules.RPCFunc - - // blocklist are peers that the gateway shouldn't connect to. - // nodes is the set of all known nodes (i.e. potential peers). - // peers are the nodes that the gateway is currently connected to. - // peerTG is a special thread group for tracking peer connections, and will - // block shutdown until all peer connections have been closed out. The peer - // connections are put in a separate TG because of their unique - // requirements - they have the potential to live for the lifetime of the - // program, but also the potential to close early. Calling threads.OnStop - // for each peer could create a huge backlog of functions that do nothing - // (because most of the peers disconnected prior to shutdown). And they - // can't call threads.Add because they are potentially very long running - // and would block any threads.Flush() calls. So a second threadgroup is - // added which handles clean-shutdown for the peers, without blocking - // threads.Flush() calls. - blocklist map[string]struct{} - nodes map[modules.NetAddress]*node - peers map[modules.NetAddress]*peer - peerTG siasync.ThreadGroup - - // Utilities. - log *persist.Logger - mu sync.RWMutex - persist persistence - threads siasync.ThreadGroup - staticAlerter *modules.GenericAlerter - - // Unique ID - staticID gatewayID - - staticUseUPNP bool -} - -type gatewayID [8]byte - -// addToBlocklist adds addresses to the Gateway's blocklist. -func (g *Gateway) addToBlocklist(addresses []string) error { - // Add addresses to the blocklist and disconnect from them. - var err error - for _, addr := range addresses { - // Check Gateway peer map for address. - for peerAddr, peer := range g.peers { - // If the address corresponds with a peer, close the peer session - // and remove the peer from the peer map. - if peerAddr.Host() == addr { - err = modules.ComposeErrors(err, peer.sess.Close()) - delete(g.peers, peerAddr) - } - } - // Check Gateway node map for address. - for nodeAddr := range g.nodes { - // If the address corresponds with a node remove the node from the - // node map to prevent the node from being re-connected while - // looking for a replacement peer. - if nodeAddr.Host() == addr { - delete(g.nodes, nodeAddr) - } - } - - // Add address to the blocklist. - g.blocklist[addr] = struct{}{} - } - return modules.ComposeErrors(err, g.save()) -} - -// managedSleep will sleep for the given period of time. If the full time -// elapses, 'true' is returned. If the sleep is interrupted for shutdown, -// 'false' is returned. -func (g *Gateway) managedSleep(t time.Duration) (completed bool) { - select { - case <-time.After(t): - return true - case <-g.threads.StopChan(): - return false - } -} - -// Address returns the NetAddress of the Gateway. -func (g *Gateway) Address() modules.NetAddress { - g.mu.RLock() - defer g.mu.RUnlock() - return g.myAddr -} - -// AddToBlocklist adds addresses to the Gateway's blocklist. -func (g *Gateway) AddToBlocklist(addresses []string) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - g.mu.Lock() - defer g.mu.Unlock() - return g.addToBlocklist(addresses) -} - -// Blocklist returns the Gateway's blocklist. -func (g *Gateway) Blocklist() ([]string, error) { - if err := g.threads.Add(); err != nil { - return nil, err - } - defer g.threads.Done() - g.mu.RLock() - defer g.mu.RUnlock() - - var blocklist []string - for addr := range g.blocklist { - blocklist = append(blocklist, addr) - } - return blocklist, nil -} - -// Close saves the state of the Gateway and stops its listener process. -func (g *Gateway) Close() error { - if err := g.threads.Stop(); err != nil { - return err - } - g.mu.Lock() - defer g.mu.Unlock() - return g.save() -} - -// DiscoverAddress discovers and returns the current public IP address of the -// gateway. Contrary to Address, DiscoverAddress is blocking and might take -// multiple minutes to return. A channel to cancel the discovery can be -// supplied optionally. If nil is supplied, a reasonable timeout will be used -// by default. -func (g *Gateway) DiscoverAddress(cancel <-chan struct{}) (net.IP, error) { - return g.managedLearnHostname(cancel) -} - -// ForwardPort adds a port mapping to the router. -func (g *Gateway) ForwardPort(port string) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - return g.managedForwardPort(port) -} - -// RemoveFromBlocklist removes addresses from the Gateway's blocklist. -func (g *Gateway) RemoveFromBlocklist(addresses []string) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - g.mu.Lock() - defer g.mu.Unlock() - - // Remove addresses from the blocklist - for _, addr := range addresses { - delete(g.blocklist, addr) - } - return g.save() -} - -// SetBlocklist sets the blocklist of the gateway. -func (g *Gateway) SetBlocklist(addresses []string) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - g.mu.Lock() - defer g.mu.Unlock() - - // Reset the gateway blocklist since we are replacing the list with the new - // list of peers. - g.blocklist = make(map[string]struct{}) - - // If the length of addresses is 0 we are done, save and return. - if len(addresses) == 0 { - return g.save() - } - - // Add addresses to the blocklist and disconnect from them. - return g.addToBlocklist(addresses) -} - -// New returns an initialized Gateway. -func New(db *sql.DB, addr string, bootstrap bool, useUPNP bool, dir string) (*Gateway, error) { - // Check for the nil dependency. - if db == nil { - return nil, errNilDB - } - - g := &Gateway{ - db: db, - - handlers: make(map[rpcID]modules.RPCFunc), - initRPCs: make(map[string]modules.RPCFunc), - - blocklist: make(map[string]struct{}), - nodes: make(map[modules.NetAddress]*node), - peers: make(map[modules.NetAddress]*peer), - - staticAlerter: modules.NewAlerter("gateway"), - staticUseUPNP: useUPNP, - } - - // Set Unique GatewayID. - frand.Read(g.staticID[:]) - - // Create the logger. - var err error - g.log, err = persist.NewFileLogger(filepath.Join(dir, logFile)) - if err != nil { - return nil, err - } - // Establish the closing of the logger. - g.threads.AfterStop(func() { - if err := g.log.Close(); err != nil { - // The logger may or may not be working here, so use a Println - // instead. - fmt.Println("Failed to close the gateway logger:", err) - } - }) - g.log.Println("INFO: gateway created, started logging") - - // Establish that the peerTG must complete shutdown before the primary - // thread group completes shutdown. - g.threads.OnStop(func() { - err = g.peerTG.Stop() - if err != nil { - g.log.Println("ERROR: peerTG experienced errors while shutting down:", err) - } - }) - - // Register RPCs. - g.RegisterRPC("ShareNodes", g.shareNodes) - g.RegisterRPC("DiscoverIP", g.discoverPeerIP) - g.RegisterConnectCall("ShareNodes", g.requestNodes) - // Establish the de-registration of the RPCs. - g.threads.OnStop(func() { - g.UnregisterRPC("ShareNodes") - g.UnregisterRPC("DiscoverIP") - g.UnregisterConnectCall("ShareNodes") - }) - - // Load the old node list and gateway persistence. - if err := g.load(); err != nil { - return nil, modules.AddContext(err, "unable to load gateway") - } - - // Spawn the thread to periodically save the gateway. - go g.threadedSaveLoop() - - // Make sure that the gateway saves after shutdown. - g.threads.AfterStop(func() { - g.mu.Lock() - defer g.mu.Unlock() - if err := g.save(); err != nil { - g.log.Println("ERROR: unable to save gateway:", err) - } - }) - - // Add the bootstrap peers to the node list. - if bootstrap { - for _, addr := range modules.BootstrapPeers { - err := g.addNode(addr) - if err != nil && !modules.ContainsError(err, errNodeExists) { - g.log.Printf("WARN: failed to add the bootstrap node '%v': %v", addr, err) - } - } - } - - // Create the listener which will listen for new connections from peers. - permanentListenClosedChan := make(chan struct{}) - g.listener, err = net.Listen("tcp", addr) - if err != nil { - context := fmt.Sprintf("unable to create gateway tcp listener with address %v", addr) - return nil, modules.AddContext(err, context) - } - - // Automatically close the listener when g.threads.Stop() is called. - g.threads.OnStop(func() { - err := g.listener.Close() - if err != nil { - g.log.Println("WARN: closing the listener failed:", err) - } - <-permanentListenClosedChan - }) - - // Set the address and port of the gateway. - host, port, err := net.SplitHostPort(g.listener.Addr().String()) - g.port = port - if err != nil { - context := fmt.Sprintf("unable to split host and port from address %v", g.listener.Addr().String()) - return nil, modules.AddContext(err, context) - } - - if ip := net.ParseIP(host); ip.IsUnspecified() && ip != nil { - // If host is unspecified, set a dummy one for now. - host = "localhost" - } - - // Set myAddr equal to the address returned by the listener. It will be - // overwritten by threadedLearnHostname later on. - g.myAddr = modules.NetAddress(net.JoinHostPort(host, port)) - - // Spawn the peer connection listener. - go g.permanentListen(permanentListenClosedChan) - - // Spawn the peer manager and provide tools for ensuring clean shutdown. - peerManagerClosedChan := make(chan struct{}) - g.threads.OnStop(func() { - <-peerManagerClosedChan - }) - go g.permanentPeerManager(peerManagerClosedChan) - - // Spawn the node manager and provide tools for ensuring clean shutdown. - nodeManagerClosedChan := make(chan struct{}) - g.threads.OnStop(func() { - <-nodeManagerClosedChan - }) - go g.permanentNodeManager(nodeManagerClosedChan) - - // Spawn the node purger and provide tools for ensuring clean shutdown. - nodePurgerClosedChan := make(chan struct{}) - g.threads.OnStop(func() { - <-nodePurgerClosedChan - }) - go g.permanentNodePurger(nodePurgerClosedChan) - - // Spawn threads to take care of port forwarding and hostname discovery. - go g.threadedForwardPort(g.port) - go g.threadedLearnHostname() - - // Spawn thread to periodically check if the gateway is online. - go g.threadedOnlineCheck() - - return g, nil -} - -// threadedOnlineCheck periodically calls 'Online' to register the -// GatewayOffline alert. -func (g *Gateway) threadedOnlineCheck() { - if err := g.threads.Add(); err != nil { - return - } - defer g.threads.Done() - for { - select { - case <-g.threads.StopChan(): - return - case <-time.After(onlineCheckFrequency): - } - _ = g.Online() - } -} - -// enforce that Gateway satisfies the modules.Gateway interface. -var _ modules.Gateway = (*Gateway)(nil) diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go deleted file mode 100644 index ed5a80e..0000000 --- a/modules/gateway/ip.go +++ /dev/null @@ -1,106 +0,0 @@ -package gateway - -import ( - "errors" - "io" - "net" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// discoverPeerIP is the handler for the discoverPeer RPC. It returns the -// public ip of the caller back to the caller. This allows for peer-to-peer ip -// discovery without centralized services. -func (g *Gateway) discoverPeerIP(conn modules.PeerConn) error { - conn.SetDeadline(time.Now().Add(connStdDeadline)) - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return modules.AddContext(err, "failed to split host from port") - } - e := types.NewEncoder(conn) - modules.WriteString(host, e) - return nil -} - -// managedIPFromPeers asks the peers the node is connected to for the node's -// public ip address. If not enough peers are available we wait a bit and try -// again. If no cancelation channel is provided, managedIPFromPeers will time -// out after timeoutIPDiscovery time. Otherwise it will time out when cancel is -// closed. The method might return with a short delay of -// peerDiscoveryRetryInterval. -func (g *Gateway) managedIPFromPeers(cancel <-chan struct{}) (string, error) { - // Choose default if cancel is nil. - var timeout <-chan time.Time - if cancel == nil { - timer := time.NewTimer(timeoutIPDiscovery) - defer timer.Stop() - timeout = timer.C - } - for { - // Check for shutdown signal or timeout. - select { - case <-g.peerTG.StopChan(): - return "", errors.New("interrupted by shutdown") - case <-timeout: - return "", errors.New("failed to discover ip in time") - case <-cancel: - return "", errors.New("failed to discover ip in time") - default: - } - // Get peers. - peers := g.Peers() - // Check if there are enough peers. Otherwise wait. - if len(peers) < minPeersForIPDiscovery { - g.managedSleep(peerDiscoveryRetryInterval) - continue - } - // Ask all the peers about our ip in parallel. - returnChan := make(chan string) - for _, peer := range peers { - go g.RPC(peer.NetAddress, "DiscoverIP", func(conn modules.PeerConn) error { - d := types.NewDecoder(io.LimitedReader{R: conn, N: 100}) - address := modules.ReadString(d) - err := d.Err() - if err != nil { - returnChan <- "" - return err - } - addr := net.ParseIP(address) - if addr == nil { - returnChan <- "" - return errors.New("failed to parse ip address") - } - returnChan <- addr.String() - return nil - }) - } - // Wait for their responses. - addresses := make(map[string]int) - successfulResponses := 0 - for i := 0; i < len(peers); i++ { - addr := <-returnChan - if addr != "" { - addresses[addr]++ - successfulResponses++ - } - } - // If there haven't been enough successful responses we wait some time. - if successfulResponses < minPeersForIPDiscovery { - g.managedSleep(peerDiscoveryRetryInterval) - continue - } - // If an address was returned by more than half the peers we consider - // it valid. - for addr, count := range addresses { - if count > successfulResponses/2 { - g.log.Println("INFO: ip successfully discovered using peers:", addr) - return addr, nil - } - } - // Otherwise we wait before trying again. - g.managedSleep(peerDiscoveryRetryInterval) - } -} diff --git a/modules/gateway/nodes.go b/modules/gateway/nodes.go deleted file mode 100644 index 0ce8e8f..0000000 --- a/modules/gateway/nodes.go +++ /dev/null @@ -1,361 +0,0 @@ -package gateway - -import ( - "encoding/binary" - "bytes" - "errors" - "io" - "net" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" - - "lukechampine.com/frand" -) - -var ( - errNodeExists = errors.New("node already added") - errNoNodes = errors.New("no nodes in the node list") - errOurAddress = errors.New("can't add our own address") - errPeerGenesisID = errors.New("peer has different genesis ID") -) - -// A node represents a potential peer on the Sia network. -type node struct { - NetAddress modules.NetAddress `json:"netaddress"` - WasOutboundPeer bool `json:"wasoutboundpeer"` -} - -// addNode adds an address to the set of nodes on the network. -func (g *Gateway) addNode(addr modules.NetAddress) error { - if addr == g.myAddr { - return errOurAddress - } else if _, exists := g.nodes[addr]; exists { - return errNodeExists - } else if addr.IsStdValid() != nil { - return errors.New("address is not valid: " + string(addr)) - } else if net.ParseIP(addr.Host()) == nil { - return errors.New("address must be an IP address: " + string(addr)) - } - g.nodes[addr] = &node{ - NetAddress: addr, - WasOutboundPeer: false, - } - return nil -} - -// staticPingNode verifies that there is a reachable node at the provided address -// by performing the Sia gateway handshake protocol. -func (g *Gateway) staticPingNode(addr modules.NetAddress) (err error) { - // Ping the untrusted node to see whether or not there's actually a - // reachable node at the provided address. - conn, err := g.staticDial(addr) - if err != nil { - return err - } - defer func() { - err = modules.ComposeErrors(err, conn.Close()) - }() - - // Read the node's version. - remoteVersion, err := connectVersionHandshake(conn, ProtocolVersion) - if err != nil { - return err - } - - if err := acceptableVersion(remoteVersion); err != nil { - // Return an error so that bad version peers are purged. - return err - } - - // Send our header. - // NOTE: since we don't intend to complete the connection, we can send an - // inaccurate NetAddress. - ourHeader := sessionHeader{ - GenesisID: modules.GenesisID, - UniqueID: g.staticID, - NetAddress: modules.NetAddress(conn.LocalAddr().String()), - } - if err := exchangeOurHeader(conn, ourHeader); err != nil { - return err - } - - // Read remote header. - var remoteHeader sessionHeader - d := types.NewDecoder(io.LimitedReader{R: conn, N: maxEncodedSessionHeaderSize}) - remoteHeader.DecodeFrom(d) - if err := d.Err(); err != nil { - return modules.AddContext(err, "failed to read remote header") - } else if err := acceptableSessionHeader(ourHeader, remoteHeader, conn.RemoteAddr().String()); err != nil { - return err - } - - // Send special rejection string. - e := types.NewEncoder(conn) - modules.WriteString(modules.StopResponse, e) - - return nil -} - -// removeNode will remove a node from the gateway. -func (g *Gateway) removeNode(addr modules.NetAddress) error { - if _, exists := g.nodes[addr]; !exists { - return errors.New("no record of that node") - } - delete(g.nodes, addr) - return nil -} - -// randomNode returns a random node from the gateway. An error can be returned -// if there are no nodes in the node list. -func (g *Gateway) randomNode() (modules.NetAddress, error) { - if len(g.nodes) == 0 { - return "", errNoPeers - } - - // Select a random peer. Note that the algorithm below is roughly linear in - // the number of nodes known by the gateway, and this number can approach - // every node on the network. If the network gets large, this algorithm - // will either need to be refactored, or more likely a cap on the size of - // g.nodes will need to be added. - r := frand.Intn(len(g.nodes)) - for node := range g.nodes { - if r <= 0 { - return node, nil - } - r-- - } - return "", errNoPeers -} - -// shareNodes is the receiving end of the ShareNodes RPC. It writes up to 10 -// randomly selected nodes to the caller. -func (g *Gateway) shareNodes(conn modules.PeerConn) error { - conn.SetDeadline(time.Now().Add(connStdDeadline)) - remoteNA := modules.NetAddress(conn.RemoteAddr().String()) - - // Assemble a list of nodes to send to the peer. - var nodes []modules.NetAddress - func() { - g.mu.RLock() - defer g.mu.RUnlock() - - // Gather candidates for sharing. - gnodes := make([]modules.NetAddress, 0, len(g.nodes)) - for node := range g.nodes { - // Don't share local peers with remote peers. That means that if 'node' - // is loopback, it will only be shared if the remote peer is also - // loopback. And if 'node' is private, it will only be shared if the - // remote peer is either the loopback or is also private. - if node.IsLoopback() && !remoteNA.IsLoopback() { - continue - } - if node.IsLocal() && !remoteNA.IsLocal() { - continue - } - gnodes = append(gnodes, node) - } - - // Iterate through the random permutation of nodes and select the - // desirable ones. - for _, i := range frand.Perm(len(gnodes)) { - nodes = append(nodes, gnodes[i]) - if uint64(len(nodes)) == maxSharedNodes { - break - } - } - }() - - // Send the list. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - e.WriteUint64(0) - e.WritePrefix(len(nodes)) - for _, n := range nodes { - e.WriteString(string(n)) - } - e.Flush() - b := buf.Bytes() - binary.LittleEndian.PutUint64(b[:8], uint64(len(b) - 8)) - _, err := conn.Write(b) - - return err -} - -// requestNodes is the calling end of the ShareNodes RPC. -func (g *Gateway) requestNodes(conn modules.PeerConn) error { - conn.SetDeadline(time.Now().Add(connStdDeadline)) - - d := types.NewDecoder(io.LimitedReader{R: conn, N: int64(maxSharedNodes * modules.MaxEncodedNetAddressLength)}) - num := modules.ReadUint64(d) - if err := d.Err(); err != nil { - return err - } - nodes := make([]modules.NetAddress, 0, num) - for num > 0 { - node := modules.NetAddress(d.ReadString()) - if err := d.Err(); err != nil { - return err - } - nodes = append(nodes, node) - num-- - } - - g.mu.Lock() - changed := false - for _, node := range nodes { - err := g.addNode(node) - if err != nil && !modules.ContainsError(err, errNodeExists) && !modules.ContainsError(err, errOurAddress) { - g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node) - } - if err == nil { - changed = true - } - } - if changed { - err := g.save() - if err != nil { - g.log.Println("ERROR: unable to save new nodes added to the gateway:", err) - } - } - g.mu.Unlock() - return nil -} - -// permanentNodePurger is a thread that runs throughout the lifetime of the -// gateway, purging unconnectable nodes from the node list in a sustainable -// way. -func (g *Gateway) permanentNodePurger(closeChan chan struct{}) { - defer close(closeChan) - - for { - // Choose an amount of time to wait before attempting to prune a node. - // Nodes will occasionally go offline for some time, which can even be - // days. We don't want to too aggressively prune nodes with low-moderate - // uptime, as they are still useful to the network. - // - // But if there are a lot of nodes, we want to make sure that the node - // list does not become saturated with inaccessible / offline nodes. - // Pruning happens a lot faster when there are a lot of nodes in the - // gateway. - // - // This value is a ratelimit which tries to keep the nodes list in the - // gateawy healthy. A more complex algorithm might adjust this number - // according to the percentage of prune attempts that are successful - // (decrease prune frequency if most nodes in the database are online, - // increase prune frequency if more nodes in the database are offline). - waitTime := nodePurgeDelay - g.mu.RLock() - nodeCount := len(g.nodes) - g.mu.RUnlock() - if nodeCount > quickPruneListLen { - waitTime = fastNodePurgeDelay - } - - // Sleep as a purge ratelimit. - select { - case <-time.After(waitTime): - case <-g.threads.StopChan(): - // The gateway is shutting down, close out the thread. - return - } - - // Get a random node for scanning. - g.mu.RLock() - numNodes := len(g.nodes) - node, err := g.randomNode() - g.mu.RUnlock() - if modules.ContainsError(err, errNoNodes) { - // errNoNodes is a common error that will be resolved by the - // bootstrap process. - continue - } else if err != nil { - // Unusual error, create a logging statement. - g.log.Println("ERROR: could not pick a random node for uptime check:", err) - continue - } - if numNodes <= pruneNodeListLen { - // There are not enough nodes in the gateway - pruning more is - // probably a bad idea, and may affect the user's ability to - // connect to the network in the future. - continue - } - // Check whether this node is already a peer. If so, no need to dial - // them. - g.mu.RLock() - _, exists := g.peers[node] - g.mu.RUnlock() - if exists { - continue - } - - // Try connecting to the random node. If the node is not reachable, - // remove them from the node list. - // - // NOTE: an error may be returned if the dial is canceled partway - // through, which would cause the node to be pruned even though it may - // be a good node. Because nodes are plentiful, this is an acceptable - // bug. - if err = g.staticPingNode(node); err != nil { - g.mu.Lock() - if len(g.nodes) > pruneNodeListLen { - // Check if the number of nodes is still above the threshold. - g.removeNode(node) - } - g.mu.Unlock() - } - } -} - -// permanentNodeManager tries to keep the Gateway's node list healthy. As long -// as the Gateway has fewer than healthyNodeListLen nodes, it asks a random -// peer for more nodes. It also continually pings nodes in order to establish -// their connectivity. Unresponsive nodes are aggressively removed. -func (g *Gateway) permanentNodeManager(closeChan chan struct{}) { - defer close(closeChan) - - for { - // Wait 5 seconds so that a controlled number of node requests are made - // to peers. - select { - case <-time.After(nodeListDelay): - case <-g.threads.StopChan(): - // Gateway is shutting down, close the thread. - return - } - - g.mu.RLock() - numNodes := len(g.nodes) - peer, err := g.randomOutboundPeer() - g.mu.RUnlock() - if modules.ContainsError(err, errNoPeers) { - // errNoPeers is a common and expected error, there's no need to - // log it. - continue - } else if err != nil { - g.log.Println("ERROR: could not fetch a random peer:", err) - continue - } - - // Determine whether there are a satisfactory number of nodes in the - // nodelist. If there are not, use the random peer from earlier to - // expand the node list. - if numNodes < healthyNodeListLen { - err := g.managedRPC(peer, "ShareNodes", g.requestNodes) - if err != nil { - continue - } - } else { - // There are enough nodes in the gateway, no need to check for more - // every 5 seconds. Wait a while before checking again. - select { - case <-time.After(wellConnectedDelay): - case <-g.threads.StopChan(): - // Gateway is shutting down, close the thread. - return - } - } - } -} diff --git a/modules/gateway/peers.go b/modules/gateway/peers.go deleted file mode 100644 index bcbef7e..0000000 --- a/modules/gateway/peers.go +++ /dev/null @@ -1,615 +0,0 @@ -package gateway - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "time" - - "github.com/mike76-dev/sia-satellite/internal/build" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" - - "lukechampine.com/frand" -) - -var ( - errPeerExists = errors.New("already connected to this peer") - errPeerRejectedConn = errors.New("peer rejected connection") - - // ErrPeerNotConnected is returned when trying to disconnect from a peer - // that the gateway is not connected to. - ErrPeerNotConnected = errors.New("not connected to that node") -) - -// insufficientVersionError indicates a peer's version is insufficient. -type insufficientVersionError string - -// Error implements the error interface for insufficientVersionError. -func (s insufficientVersionError) Error() string { - return "unacceptable version: " + string(s) -} - -// invalidVersionError indicates a peer's version is not a valid version number. -type invalidVersionError string - -// Error implements the error interface for invalidVersionError. -func (s invalidVersionError) Error() string { - return "invalid version: " + string(s) -} - -type peer struct { - modules.Peer - sess streamSession -} - -// sessionHeader is sent after the initial version exchange. It prevents peers -// on different blockchains from connecting to each other, and prevents the -// gateway from connecting to itself. -type sessionHeader struct { - GenesisID types.BlockID - UniqueID gatewayID - NetAddress modules.NetAddress -} - -// EncodeTo implements the types.EncoderTo interface. -func (sh *sessionHeader) EncodeTo(e *types.Encoder) { - var buf bytes.Buffer - enc := types.NewEncoder(&buf) - enc.Write(sh.GenesisID[:]) - enc.Write(sh.UniqueID[:]) - enc.WriteString(string(sh.NetAddress)) - enc.Flush() - b := buf.Bytes() - e.WriteBytes(b) -} - -// DecodeFrom implements the types.DecoderFrom interface. -func (sh *sessionHeader) DecodeFrom(d *types.Decoder) { - _ = d.ReadUint64() // Length prefix. - d.Read(sh.GenesisID[:]) - d.Read(sh.UniqueID[:]) - sh.NetAddress = modules.NetAddress(d.ReadString()) -} - -func (p *peer) open() (modules.PeerConn, error) { - conn, err := p.sess.Open() - if err != nil { - return nil, err - } - return &peerConn{conn, p.NetAddress}, nil -} - -func (p *peer) accept() (modules.PeerConn, error) { - conn, err := p.sess.Accept() - if err != nil { - return nil, err - } - return &peerConn{conn, p.NetAddress}, nil -} - -// addPeer adds a peer to the Gateway's peer list, spawns a listener thread to -// handle its requests and increments the remotePeers accordingly. -func (g *Gateway) addPeer(p *peer) { - g.peers[p.NetAddress] = p - go g.threadedListenPeer(p) -} - -// callInitRPCs calls the rpcs that are registered to be called upon connecting -// to a peer. -func (g *Gateway) callInitRPCs(addr modules.NetAddress) { - for name, fn := range g.initRPCs { - go func(name string, fn modules.RPCFunc) { - if g.threads.Add() != nil { - return - } - defer g.threads.Done() - - g.managedRPC(addr, name, fn) - }(name, fn) - } -} - -// randomOutboundPeer returns a random outbound peer. -func (g *Gateway) randomOutboundPeer() (modules.NetAddress, error) { - // Get the list of outbound peers. - var addrs []modules.NetAddress - for addr, peer := range g.peers { - if peer.Inbound { - continue - } - addrs = append(addrs, addr) - } - if len(addrs) == 0 { - return "", errNoPeers - } - - // Of the remaining options, select one at random. - return addrs[frand.Intn(len(addrs))], nil -} - -// permanentListen handles incoming connection requests. If the connection is -// accepted, the peer will be added to the Gateway's peer list. -func (g *Gateway) permanentListen(closeChan chan struct{}) { - // Signal that the permanentListen thread has completed upon returning. - defer close(closeChan) - - for { - conn, err := g.listener.Accept() - if err != nil { - return - } - - go g.threadedAcceptConn(conn) - - // Sleep after each accept. This limits the rate at which the Gateway - // will accept new connections. The intent here is to prevent new - // incoming connections from kicking out old ones before they have a - // chance to request additional nodes. - select { - case <-time.After(acceptInterval): - case <-g.threads.StopChan(): - return - } - } -} - -// threadedAcceptConn adds a connecting node as a peer. -func (g *Gateway) threadedAcceptConn(conn net.Conn) { - if g.threads.Add() != nil { - conn.Close() - return - } - defer g.threads.Done() - conn.SetDeadline(time.Now().Add(connStdDeadline)) - - addr := modules.NetAddress(conn.RemoteAddr().String()) - - g.mu.RLock() - _, exists := g.blocklist[addr.Host()] - g.mu.RUnlock() - if exists { - conn.Close() - return - } - remoteVersion, err := acceptVersionHandshake(conn, ProtocolVersion) - if err != nil { - conn.Close() - return - } - - if err = acceptableVersion(remoteVersion); err == nil { - err = g.managedAcceptConnPeer(conn, remoteVersion) - } - if err != nil { - conn.Close() - return - } - - // Handshake successful, remove the deadline. - conn.SetDeadline(time.Time{}) -} - -// acceptableSessionHeader returns an error if remoteHeader indicates a peer -// that should not be connected to. -func acceptableSessionHeader(ourHeader, remoteHeader sessionHeader, remoteAddr string) error { - if remoteHeader.GenesisID != ourHeader.GenesisID { - return errPeerGenesisID - } else if remoteHeader.UniqueID == ourHeader.UniqueID { - return errOurAddress - } else if err := remoteHeader.NetAddress.IsStdValid(); err != nil { - return modules.AddContext(err, "invalid remote address") - } - return nil -} - -// managedAcceptConnPeer accepts connection requests from peers >= v1.3.1. -// The requesting peer is added as a node and a peer. The peer is only added if -// a nil error is returned. -func (g *Gateway) managedAcceptConnPeer(conn net.Conn, remoteVersion string) error { - // Perform header handshake. - g.mu.RLock() - ourHeader := sessionHeader{ - GenesisID: modules.GenesisID, - UniqueID: g.staticID, - NetAddress: g.myAddr, - } - g.mu.RUnlock() - - remoteHeader, err := exchangeRemoteHeader(conn, ourHeader) - if err != nil { - return err - } - if err := exchangeOurHeader(conn, ourHeader); err != nil { - return err - } - - // Get the remote address on which the connecting peer is listening on. - // This means we need to combine the incoming connections ip address with - // the announced open port of the peer. - remoteIP := modules.NetAddress(conn.RemoteAddr().String()).Host() - remotePort := remoteHeader.NetAddress.Port() - remoteAddr := modules.NetAddress(net.JoinHostPort(remoteIP, remotePort)) - - // Accept the peer. - peer := &peer{ - Peer: modules.Peer{ - Inbound: true, - // NOTE: local may be true even if the supplied NetAddress is not - // actually reachable. - Local: remoteAddr.IsLocal(), - // Ignoring claimed IP address (which should be == to the socket address) - // by the host but keeping note of the port number so we can call back. - NetAddress: remoteAddr, - Version: remoteVersion, - }, - sess: newServerStream(conn, remoteVersion), - } - g.mu.Lock() - g.acceptPeer(peer) - g.mu.Unlock() - - // Attempt to ping the supplied address. If successful, we will add - // remoteHeader.NetAddress to our node list after accepting the peer. We - // do this in a goroutine so that we can begin communicating with the peer - // immediately. - go func() { - err := g.staticPingNode(remoteAddr) - if err == nil { - g.mu.Lock() - g.addNode(remoteAddr) - g.mu.Unlock() - } - }() - - return nil -} - -// acceptPeer makes room for the peer if necessary by kicking out existing -// peers, then adds the peer to the peer list. -func (g *Gateway) acceptPeer(p *peer) { - // If we are not fully connected, add the peer without kicking any out. - if len(g.peers) < fullyConnectedThreshold { - g.addPeer(p) - return - } - - // Select a peer to kick. Outbound peers and local peers are not - // available to be kicked. - var addrs, preferredAddrs []modules.NetAddress - for addr, peer := range g.peers { - // Do not kick outbound peers or local peers. - if !peer.Inbound || peer.Local { - continue - } - - // Prefer kicking a peer with the same hostname. - if addr.Host() == p.NetAddress.Host() { - preferredAddrs = append(preferredAddrs, addr) - continue - } - addrs = append(addrs, addr) - } - if len(preferredAddrs) > 0 { - // If there are preferredAddrs we choose randomly from them. - addrs = preferredAddrs - } - if len(addrs) == 0 { - // There is nobody suitable to kick, therefore do not kick anyone. - g.addPeer(p) - return - } - - // Of the remaining options, select one at random. - kick := addrs[frand.Intn(len(addrs))] - - g.peers[kick].sess.Close() - delete(g.peers, kick) - g.log.Printf("INFO: disconnected from %v to make room for %v\n", kick, p.NetAddress) - g.addPeer(p) -} - -// acceptableVersion returns an error if the version is unacceptable. -func acceptableVersion(version string) error { - if !build.IsVersion(version) { - return invalidVersionError(version) - } - if build.VersionCmp(version, minimumAcceptablePeerVersion) < 0 { - return insufficientVersionError(version) - } - return nil -} - -// connectVersionHandshake performs the version handshake and should be called -// on the side making the connection request. The remote version is only -// returned if err == nil. -func connectVersionHandshake(conn net.Conn, version string) (remoteVersion string, err error) { - // Send our version. - e := types.NewEncoder(conn) - modules.WriteString(version, e) - - // Read remote version. - d := types.NewDecoder(io.LimitedReader{R: conn, N: build.MaxEncodedVersionLength}) - remoteVersion = modules.ReadString(d) - if err := d.Err(); err != nil { - return "", modules.AddContext(err, "failed to read remote version") - } - - // Check that their version is acceptable. - if remoteVersion == "reject" { - return "", errPeerRejectedConn - } - if err := acceptableVersion(remoteVersion); err != nil { - return "", err - } - - return remoteVersion, nil -} - -// acceptVersionHandshake performs the version handshake and should be -// called on the side accepting a connection request. The remote version is -// only returned if err == nil. -func acceptVersionHandshake(conn net.Conn, version string) (remoteVersion string, err error) { - // Read remote version. - d := types.NewDecoder(io.LimitedReader{R: conn, N: build.MaxEncodedVersionLength}) - remoteVersion = modules.ReadString(d) - if err := d.Err(); err != nil { - return "", modules.AddContext(err, "failed to read remote version") - } - - // Check that their version is acceptable. - e := types.NewEncoder(conn) - if err := acceptableVersion(remoteVersion); err != nil { - modules.WriteString("reject", e) - return "", err - } - - // Send our version. - modules.WriteString(version, e) - - return remoteVersion, nil -} - -// exchangeOurHeader writes ourHeader and reads the remote's error response. -func exchangeOurHeader(conn net.Conn, ourHeader sessionHeader) error { - // Send our header. - e := types.NewEncoder(conn) - ourHeader.EncodeTo(e) - e.Flush() - - // Read remote response. - d := types.NewDecoder(io.LimitedReader{R: conn, N: 100}) - response := modules.ReadString(d) - if err := d.Err(); err != nil { - return modules.AddContext(err, "failed to read header acceptance") - } else if response == modules.StopResponse { - return errors.New("peer did not want a connection") - } else if response != modules.AcceptResponse { - return fmt.Errorf("peer rejected our header: %v", response) - } - - return nil -} - -// exchangeRemoteHeader reads the remote header and writes an error response. -func exchangeRemoteHeader(conn net.Conn, ourHeader sessionHeader) (sessionHeader, error) { - // Read remote header. - var remoteHeader sessionHeader - d := types.NewDecoder(io.LimitedReader{R: conn, N: maxEncodedSessionHeaderSize}) - remoteHeader.DecodeFrom(d) - if err := d.Err(); err != nil { - return sessionHeader{}, modules.AddContext(err, "failed to read remote header") - } - - // Validate remote header and write acceptance or rejection. - e := types.NewEncoder(conn) - err := acceptableSessionHeader(ourHeader, remoteHeader, conn.RemoteAddr().String()) - if err != nil { - modules.WriteString(err.Error(), e) - return sessionHeader{}, modules.AddContext(err, "peer's header was not acceptable") - } - modules.WriteString(modules.AcceptResponse, e) - - return remoteHeader, nil -} - -// managedConnectPeer connects to peers >= v1.3.1. The peer is added as a -// node and a peer. The peer is only added if a nil error is returned. -func (g *Gateway) managedConnectPeer(conn net.Conn, remoteVersion string, remoteAddr modules.NetAddress) error { - // Perform header handshake. - g.mu.RLock() - ourHeader := sessionHeader{ - GenesisID: modules.GenesisID, - UniqueID: g.staticID, - NetAddress: g.myAddr, - } - g.mu.RUnlock() - - if err := exchangeOurHeader(conn, ourHeader); err != nil { - return err - } else if _, err := exchangeRemoteHeader(conn, ourHeader); err != nil { - return err - } - return nil -} - -// managedConnect establishes a persistent connection to a peer, and adds it to -// the Gateway's peer list. -func (g *Gateway) managedConnect(addr modules.NetAddress) error { - // Perform verification on the input address. - g.mu.RLock() - gaddr := g.myAddr - g.mu.RUnlock() - if addr == gaddr { - err := errors.New("can't connect to our own address") - return err - } - if err := addr.IsStdValid(); err != nil { - err := errors.New("can't connect to invalid address") - return err - } - if net.ParseIP(addr.Host()) == nil { - err := errors.New("address must be an IP address") - return err - } - if _, exists := g.blocklist[addr.Host()]; exists { - err := errors.New("can't connect to blocklisted address") - return err - } - g.mu.RLock() - _, exists := g.peers[addr] - g.mu.RUnlock() - if exists { - return errPeerExists - } - - // Dial the peer and perform peer initialization. - conn, err := g.staticDial(addr) - if err != nil { - return err - } - - // Perform peer initialization. - remoteVersion, err := connectVersionHandshake(conn, ProtocolVersion) - if err != nil { - conn.Close() - return err - } - - if err = acceptableVersion(remoteVersion); err == nil { - err = g.managedConnectPeer(conn, remoteVersion, addr) - } - if err != nil { - conn.Close() - return err - } - - // Connection successful, clear the timeout as to maintain a persistent - // connection to this peer. - conn.SetDeadline(time.Time{}) - - // Add the peer. - g.mu.Lock() - defer g.mu.Unlock() - - g.addPeer(&peer{ - Peer: modules.Peer{ - Inbound: false, - Local: addr.IsLocal(), - NetAddress: addr, - Version: remoteVersion, - }, - sess: newClientStream(conn, remoteVersion), - }) - g.addNode(addr) - g.nodes[addr].WasOutboundPeer = true - - if err := g.save(); err != nil { - g.log.Println("ERROR: unable to save new outbound peer to gateway:", err) - } - - // call initRPCs - g.callInitRPCs(addr) - - return nil -} - -// Connect establishes a persistent connection to a peer, and adds it to the -// Gateway's peer list. -func (g *Gateway) Connect(addr modules.NetAddress) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - return g.managedConnect(addr) -} - -// Disconnect terminates a connection to a peer and removes it from the -// Gateway's peer list. -func (g *Gateway) Disconnect(addr modules.NetAddress) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - - g.mu.RLock() - p, exists := g.peers[addr] - g.mu.RUnlock() - if !exists { - err := ErrPeerNotConnected - return err - } - - p.sess.Close() - g.mu.Lock() - // Peer is removed from the peer list as well as the node list, to prevent - // the node from being re-connected while looking for a replacement peer. - delete(g.peers, addr) - delete(g.nodes, addr) - g.mu.Unlock() - - g.log.Println("INFO: disconnected from peer", addr) - return nil -} - -// ConnectManual is a wrapper for the Connect function. It is specifically used -// if a user wants to connect to a node manually. This also removes the node -// from the blocklist. -func (g *Gateway) ConnectManual(addr modules.NetAddress) error { - g.mu.Lock() - var err error - if _, exists := g.blocklist[addr.Host()]; exists { - delete(g.blocklist, addr.Host()) - err = g.save() - } - g.mu.Unlock() - return modules.ComposeErrors(err, g.Connect(addr)) -} - -// DisconnectManual is a wrapper for the Disconnect function. It is -// specifically used if a user wants to connect to a node manually. This also -// adds the node to the blocklist. -func (g *Gateway) DisconnectManual(addr modules.NetAddress) error { - err := g.Disconnect(addr) - if err == nil { - g.mu.Lock() - g.blocklist[addr.Host()] = struct{}{} - err = g.save() - g.mu.Unlock() - } - return err -} - -// Online returns true if the node is connected to the internet. -func (g *Gateway) Online() (online bool) { - defer func() { - if online { - g.staticAlerter.UnregisterAlert(modules.AlertIDGatewayOffline) - } else { - g.staticAlerter.RegisterAlert(modules.AlertIDGatewayOffline, AlertMSGGatewayOffline, "", modules.SeverityWarning) - } - }() - - g.mu.RLock() - defer g.mu.RUnlock() - for _, p := range g.peers { - if !p.Local { - return true - } - } - return false -} - -// Peers returns the addresses currently connected to the Gateway. -func (g *Gateway) Peers() []modules.Peer { - g.mu.RLock() - defer g.mu.RUnlock() - var peers []modules.Peer - for _, p := range g.peers { - peers = append(peers, p.Peer) - } - return peers -} diff --git a/modules/gateway/peersmanager.go b/modules/gateway/peersmanager.go deleted file mode 100644 index f16dfee..0000000 --- a/modules/gateway/peersmanager.go +++ /dev/null @@ -1,166 +0,0 @@ -package gateway - -import ( - "github.com/mike76-dev/sia-satellite/modules" - - "lukechampine.com/frand" -) - -// managedPeerManagerConnect is a blocking function which tries to connect to -// the input address as a peer. -func (g *Gateway) managedPeerManagerConnect(addr modules.NetAddress) { - err := g.managedConnect(addr) - if modules.ContainsError(err, errPeerExists) { - // This peer is already connected to us. Safety around the - // outbound peers relates to the fact that we have picked out - // the outbound peers instead of allow the attacker to pick out - // the peers for us. Because we have made the selection, it is - // okay to set the peer as an outbound peer. - // - // The nodelist size check ensures that an attacker can't flood - // a new node with a bunch of inbound requests. Doing so would - // result in a nodelist that's entirely full of attacker nodes. - // There's not much we can do about that anyway, but at least - // we can hold off making attacker nodes 'outbound' peers until - // our nodelist has had time to fill up naturally. - g.mu.Lock() - p, exists := g.peers[addr] - if exists { - // Have to check it exists because we released the lock, a - // race condition could mean that the peer was disconnected - // before this code block was reached. - p.Inbound = false - if n, ok := g.nodes[p.NetAddress]; ok && !n.WasOutboundPeer { - n.WasOutboundPeer = true - g.nodes[n.NetAddress] = n - } - g.callInitRPCs(p.NetAddress) - } - g.mu.Unlock() - } else if err != nil { - // Remove the node, but only if there are enough nodes in the node list. - g.mu.Lock() - if len(g.nodes) > pruneNodeListLen { - g.removeNode(addr) - } - g.mu.Unlock() - } -} - -// numOutboundPeers returns the number of outbound peers in the gateway. -func (g *Gateway) numOutboundPeers() int { - n := 0 - for _, p := range g.peers { - if !p.Inbound { - n++ - } - } - return n -} - -// permanentPeerManager tries to keep the Gateway well-connected. As long as -// the Gateway is not well-connected, it tries to connect to random nodes. -func (g *Gateway) permanentPeerManager(closedChan chan struct{}) { - // Send a signal upon shutdown. - defer close(closedChan) - - // permanentPeerManager will attempt to connect to peers asynchronously, - // such that multiple connection attempts can be open at once, but a - // limited number. - connectionLimiterChan := make(chan struct{}, maxConcurrentOutboundPeerRequests) - - for { - // Fetch the set of nodes to try. - g.mu.RLock() - nodes := g.buildPeerManagerNodeList() - g.mu.RUnlock() - if len(nodes) == 0 { - if !g.managedSleep(noNodesDelay) { - return - } - continue - } - - for _, addr := range nodes { - // Break as soon as we have enough outbound peers. - g.mu.RLock() - numOutboundPeers := g.numOutboundPeers() - isOutboundPeer := g.peers[addr] != nil && !g.peers[addr].Inbound - g.mu.RUnlock() - if numOutboundPeers >= wellConnectedThreshold { - if !g.managedSleep(wellConnectedDelay) { - return - } - break - } - if isOutboundPeer { - // Skip current outbound peers. - if !g.managedSleep(acquiringPeersDelay) { - return - } - continue - } - - // We need at least some of our outbound peers to be remote peers. If - // we already have reached a certain threshold of outbound peers and - // this peer is a local peer, do not consider it for an outbound peer. - // Sleep briefly to prevent the gateway from hogging the CPU if all - // peers are local. - if numOutboundPeers >= maxLocalOutboundPeers && addr.IsLocal() { - if !g.managedSleep(unwantedLocalPeerDelay) { - return - } - continue - } - - // Try connecting to that peer in a goroutine. Do not block unless - // there are currently 3 or more peer connection attempts open at once. - // Before spawning the thread, make sure that there is enough room by - // throwing a struct into the buffered channel. - connectionLimiterChan <- struct{}{} - go func(addr modules.NetAddress) { - // After completion, take the struct out of the channel so that the - // next thread may proceed. - defer func() { - <-connectionLimiterChan - }() - - if err := g.threads.Add(); err != nil { - return - } - defer g.threads.Done() - // peerManagerConnect will handle all of its own logging. - g.managedPeerManagerConnect(addr) - }(addr) - - // Wait a bit before trying the next peer. The peer connections are - // non-blocking, so they should be spaced out to avoid spinning up an - // uncontrolled number of threads and therefore peer connections. - if !g.managedSleep(acquiringPeersDelay) { - return - } - } - } -} - -// buildPeerManagerNodeList returns the gateway's node list in the order that -// permanentPeerManager should attempt to connect to them. -func (g *Gateway) buildPeerManagerNodeList() []modules.NetAddress { - // Flatten the node map, inserting in random order. - nodes := make([]modules.NetAddress, len(g.nodes)) - perm := frand.Perm(len(nodes)) - for _, node := range g.nodes { - nodes[perm[0]] = node.NetAddress - perm = perm[1:] - } - - // swap the outbound nodes to the front of the list - numOutbound := 0 - for i, node := range nodes { - if g.nodes[node].WasOutboundPeer { - nodes[numOutbound], nodes[i] = nodes[i], nodes[numOutbound] - numOutbound++ - } - } - return nodes -} diff --git a/modules/gateway/persist.go b/modules/gateway/persist.go deleted file mode 100644 index b5c0f87..0000000 --- a/modules/gateway/persist.go +++ /dev/null @@ -1,153 +0,0 @@ -package gateway - -import ( - "time" - - "github.com/mike76-dev/sia-satellite/modules" -) - -const ( - // logFile is the name of the log file. - logFile = "gateway.log" -) - -type ( - // persist contains all of the persistent gateway data. - persistence struct { - RouterURL string - - // Blocklisted IPs. - Blocklist []string - } -) - -// load loads the Gateway's persistent data from disk. -func (g *Gateway) load() error { - // Load nodes. - nodeRows, err := g.db.Query("SELECT address, outbound FROM gw_nodes") - if err != nil { - return err - } - - for nodeRows.Next() { - var address modules.NetAddress - var outbound bool - if err := nodeRows.Scan(&address, &outbound); err != nil { - g.log.Println("ERROR: unable to retrieve node:", err) - continue - } - n := &node{ - NetAddress: address, - WasOutboundPeer: outbound, - } - g.nodes[address] = n - } - nodeRows.Close() - - // Load Gateway persistence. - g.db.QueryRow("SELECT router_url FROM gw_url").Scan(&g.persist.RouterURL) - rows, err := g.db.Query("SELECT ip FROM gw_blocklist") - if err != nil { - return err - } - - for rows.Next() { - var ip string - if err := rows.Scan(&ip); err != nil { - g.log.Println("ERROR: unable to retrieve blocklist IP:", err) - continue - } - g.persist.Blocklist = append(g.persist.Blocklist, ip) - } - rows.Close() - - // Create map from blocklist. - for _, ip := range g.persist.Blocklist { - g.blocklist[ip] = struct{}{} - } - - return nil -} - -// save stores the Gateway's persistent data in the database. -func (g *Gateway) save() error { - // Save Gateway persistence. - _, err := g.db.Exec("UPDATE gw_url SET router_url = ?", g.persist.RouterURL) - if err != nil { - return err - } - - tx, err := g.db.Begin() - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM gw_blocklist") - if err != nil { - tx.Rollback() - return err - } - for _, ip := range g.persist.Blocklist { - _, err = tx.Exec("INSERT INTO gw_blocklist (ip) VALUES (?)", ip) - if err != nil { - tx.Rollback() - return err - } - } - if err := tx.Commit(); err != nil { - g.log.Println("ERROR: unable to save Gateway persistence:", err) - return err - } - - // Save nodes. - tx, err = g.db.Begin() - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM gw_nodes") - if err != nil { - tx.Rollback() - return err - } - for _, n := range g.nodes { - _, err = tx.Exec(` - INSERT INTO gw_nodes (address, outbound) - VALUES (?, ?) - `, n.NetAddress, n.WasOutboundPeer) - if err != nil { - tx.Rollback() - return err - } - } - if err := tx.Commit(); err != nil { - g.log.Println("ERROR: unable to save Gateway nodes:", err) - return err - } - - return nil -} - -// threadedSaveLoop periodically saves the gateway nodes. -func (g *Gateway) threadedSaveLoop() { - for { - select { - case <-g.threads.StopChan(): - return - case <-time.After(saveFrequency): - } - - func() { - err := g.threads.Add() - if err != nil { - return - } - defer g.threads.Done() - - g.mu.Lock() - err = g.save() - if err != nil { - g.log.Println("ERROR: unable to save gateway:", err) - } - g.mu.Unlock() - }() - } -} diff --git a/modules/gateway/rpc.go b/modules/gateway/rpc.go deleted file mode 100644 index 58d0e1c..0000000 --- a/modules/gateway/rpc.go +++ /dev/null @@ -1,274 +0,0 @@ -package gateway - -import ( - "encoding/binary" - "bytes" - "errors" - "io" - "sync" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// rpcID is an 8-byte signature that is added to all RPCs to tell the gatway -// what to do with the RPC. -type rpcID [8]byte - -// String returns a string representation of an rpcID. Empty elements of rpcID -// will be encoded as spaces. -func (id rpcID) String() string { - for i := range id { - if id[i] == 0 { - id[i] = ' ' - } - } - return string(id[:]) -} - -// handlerName truncates a string to 8 bytes. If len(name) < 8, the remaining -// bytes are 0. A handlerName is specified at the beginning of each network -// call, indicating which function should handle the connection. -func handlerName(name string) (id rpcID) { - copy(id[:], name) - return -} - -// managedRPC calls an RPC on the given address. managedRPC cannot be called on -// an address that the Gateway is not connected to. -func (g *Gateway) managedRPC(addr modules.NetAddress, name string, fn modules.RPCFunc) (err error) { - g.mu.RLock() - peer, ok := g.peers[addr] - g.mu.RUnlock() - if !ok { - return errors.New("can't call RPC on unconnected peer " + string(addr)) - } - - conn, err := peer.open() - if err != nil { - // Peer probably disconnected without sending a shutdown signal; - // disconnect from them. - peer.sess.Close() - g.mu.Lock() - delete(g.peers, addr) - g.mu.Unlock() - return err - } - defer func() { - err = modules.ComposeErrors(err, conn.Close()) - }() - - // Write header. - conn.SetDeadline(time.Now().Add(rpcStdDeadline)) - e := types.NewEncoder(conn) - id := handlerName(name) - e.WriteUint64(8) - e.Write(id[:]) - e.Flush() - conn.SetDeadline(time.Time{}) - - // Call fn. - err = fn(conn) - return err -} - -// RPC calls an RPC on the given address. RPC cannot be called on an address -// that the Gateway is not connected to. -func (g *Gateway) RPC(addr modules.NetAddress, name string, fn modules.RPCFunc) error { - if err := g.threads.Add(); err != nil { - return err - } - defer g.threads.Done() - return g.managedRPC(addr, name, fn) -} - -// RegisterRPC registers an RPCFunc as a handler for a given identifier. To -// call an RPC, use gateway.RPC, supplying the same identifier given to -// RegisterRPC. Identifiers should always use PascalCase. The first 8 -// characters of an identifier should be unique, as the identifier used -// internally is truncated to 8 bytes. -func (g *Gateway) RegisterRPC(name string, fn modules.RPCFunc) { - g.mu.Lock() - defer g.mu.Unlock() - if _, ok := g.handlers[handlerName(name)]; ok { - g.log.Println("CRITICAL: RPC already registered: " + name) - } - g.handlers[handlerName(name)] = fn -} - -// UnregisterRPC unregisters an RPC and removes the corresponding RPCFunc from -// g.handlers. Future calls to the RPC by peers will fail. -func (g *Gateway) UnregisterRPC(name string) { - g.mu.Lock() - defer g.mu.Unlock() - if _, ok := g.handlers[handlerName(name)]; !ok { - g.log.Println("CRITICAL: RPC not registered: " + name) - } - delete(g.handlers, handlerName(name)) -} - -// RegisterConnectCall registers a name and RPCFunc to be called on a peer -// upon connecting. -func (g *Gateway) RegisterConnectCall(name string, fn modules.RPCFunc) { - g.mu.Lock() - defer g.mu.Unlock() - if _, ok := g.initRPCs[name]; ok { - g.log.Println("CRITICAL: ConnectCall already registered: " + name) - } - g.initRPCs[name] = fn -} - -// UnregisterConnectCall unregisters an on-connect call and removes the -// corresponding RPCFunc from g.initRPCs. Future connections to peers will not -// trigger the RPC to be called on them. -func (g *Gateway) UnregisterConnectCall(name string) { - g.mu.Lock() - defer g.mu.Unlock() - if _, ok := g.initRPCs[name]; !ok { - g.log.Println("CRITICAL: ConnectCall not registered: " + name) - } - delete(g.initRPCs, name) -} - -// threadedListenPeer listens for new streams on a peer connection and serves them via -// threadedHandleConn. -func (g *Gateway) threadedListenPeer(p *peer) { - // threadedListenPeer registers to the peerTG instead of the primary thread - // group because peer connections can be lifetime in length, but can also - // be short-lived. The fact that they can be lifetime means that they can't - // call threads.Add as they will block calls to threads.Flush. The fact - // that they can be short-lived means that threads.OnStop is not a good - // tool for closing out the threads. Instead, they register to peerTG, - // which is cleanly closed upon gateway shutdown but will not block any - // calls to threads.Flush(). - if g.peerTG.Add() != nil { - return - } - defer g.peerTG.Done() - - // Spin up a goroutine to listen for a shutdown signal from both the peer - // and from the gateway. In the event of either, close the session. - connClosedChan := make(chan struct{}) - peerCloseChan := make(chan struct{}) - go func() { - // Signal that the session has been successfully closed, and that this - // goroutine has terminated. - defer close(connClosedChan) - - // Listen for a stop signal. - select { - case <-g.threads.StopChan(): - case <-peerCloseChan: - } - - // Close the session and remove p from the peer list. - p.sess.Close() - g.mu.Lock() - delete(g.peers, p.NetAddress) - g.mu.Unlock() - }() - - for { - conn, err := p.accept() - if err != nil { - break - } - // Set the default deadline on the conn. - err = conn.SetDeadline(time.Now().Add(rpcStdDeadline)) - if err != nil { - g.log.Printf("Peer connection (%v) deadline could not be set: %v\n", p.NetAddress, err) - continue - } - - // The handler is responsible for closing the connection, though a - // default deadline has been set. - go g.threadedHandleConn(conn) - if !g.managedSleep(peerRPCDelay) { - break - } - } - // Signal that the goroutine can shutdown. - close(peerCloseChan) - // Wait for confirmation that the goroutine has shut down before returning - // and releasing the threadgroup registration. - <-connClosedChan -} - -// threadedHandleConn reads header data from a connection, then routes it to the -// appropriate handler for further processing. -func (g *Gateway) threadedHandleConn(conn modules.PeerConn) { - defer func() { - _ = conn.Close() - }() - if g.threads.Add() != nil { - return - } - defer g.threads.Done() - - var id rpcID - err := conn.SetDeadline(time.Now().Add(rpcStdDeadline)) - if err != nil { - return - } - d := types.NewDecoder(io.LimitedReader{R: conn, N: 16}) - _ = d.ReadUint64() - d.Read(id[:]) - if err := d.Err(); err != nil { - return - } - // Call registered handler for this ID. - g.mu.RLock() - fn, ok := g.handlers[id] - g.mu.RUnlock() - if !ok { - return - } - - fn(conn) -} - -// Broadcast calls an RPC on all of the specified peers. The calls are run in -// parallel. Broadcasts are restricted to "one-way" RPCs, which simply write an -// object and disconnect. This is why Broadcast takes an interface{} instead of -// an RPCFunc. -func (g *Gateway) Broadcast(name string, obj types.EncoderTo, peers []modules.Peer) { - if g.threads.Add() != nil { - return - } - defer g.threads.Done() - - // Encode once. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - e.WriteUint64(0) // Placeholder. - obj.EncodeTo(e) - e.Flush() - b := buf.Bytes() - binary.LittleEndian.PutUint64(b[:8], uint64(len(b) - 8)) - - fn := func(conn modules.PeerConn) error { - _, err := conn.Write(b) - return err - } - - var wg sync.WaitGroup - for _, p := range peers { - wg.Add(1) - go func(addr modules.NetAddress) { - defer wg.Done() - err := g.managedRPC(addr, name, fn) - if err != nil { - // Try one more time before giving up. - select { - case <-time.After(10 * time.Second): - case <-g.threads.StopChan(): - return - } - g.managedRPC(addr, name, fn) - } - }(p.NetAddress) - } - wg.Wait() -} diff --git a/modules/gateway/stream.go b/modules/gateway/stream.go deleted file mode 100644 index 703c1a6..0000000 --- a/modules/gateway/stream.go +++ /dev/null @@ -1,52 +0,0 @@ -package gateway - -import ( - "fmt" - "net" - - "github.com/mike76-dev/sia-satellite/internal/smux" -) - -// A streamSession is a multiplexed transport that can accept or initiate -// streams. -type streamSession interface { - Accept() (net.Conn, error) - Open() (net.Conn, error) - Close() error -} - -// newClientStream returns a new smux client. -func newClientStream(conn net.Conn, version string) streamSession { - return newSmuxClient(conn) -} - -// newServerStream returns a new smux server. -func newServerStream(conn net.Conn, version string) streamSession { - return newSmuxServer(conn) -} - -// smuxSession adapts the methods of smux.Session to conform to the -// streamSession interface. -type smuxSession struct { - sess *smux.Session -} - -func (s smuxSession) Accept() (net.Conn, error) { return s.sess.AcceptStream() } -func (s smuxSession) Open() (net.Conn, error) { return s.sess.OpenStream() } -func (s smuxSession) Close() error { return s.sess.Close() } - -func newSmuxServer(conn net.Conn) streamSession { - sess, err := smux.Server(conn, nil) // Default config means no error is possible. - if err != nil { - fmt.Println("CRITICAL: smux should not fail with default config:", err) - } - return smuxSession{sess} -} - -func newSmuxClient(conn net.Conn) streamSession { - sess, err := smux.Client(conn, nil) // Default config means no error is possible. - if err != nil { - fmt.Println("CRITICAL: smux should not fail with default config:", err) - } - return smuxSession{sess} -} diff --git a/modules/gateway/upnp.go b/modules/gateway/upnp.go deleted file mode 100644 index 1271a94..0000000 --- a/modules/gateway/upnp.go +++ /dev/null @@ -1,236 +0,0 @@ -package gateway - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "gitlab.com/NebulousLabs/go-upnp" -) - -// myExternalIP discovers the gateway's external IP by querying a centralized -// service, http://myexternalip.com. -func myExternalIP() (_ string, err error) { - // Timeout after 10 seconds. - client := http.Client{Timeout: time.Duration(10 * time.Second)} - resp, err := client.Get("http://myexternalip.com/raw") - if err != nil { - return "", err - } - defer func() { - err = modules.ComposeErrors(err, resp.Body.Close()) - }() - if resp.StatusCode != http.StatusOK { - errResp, _ := ioutil.ReadAll(resp.Body) - return "", errors.New(string(errResp)) - } - buf, err := ioutil.ReadAll(io.LimitReader(resp.Body, 64)) - if err != nil { - return "", err - } - if len(buf) == 0 { - return "", errors.New("myexternalip.com returned a 0 length IP address") - } - // Trim newline. - return strings.TrimSpace(string(buf)), nil -} - -// managedIPFromUPNP attempts learn the Gateway's external IP address via UPnP. -func (g *Gateway) managedIPFromUPNP(ctx context.Context) (string, error) { - d, err := upnp.Load(g.persist.RouterURL) - if err != nil { - d, err = upnp.DiscoverCtx(ctx) - if err != nil { - return "", err - } - loc := d.Location() - g.mu.Lock() - g.persist.RouterURL = loc - if err = g.save(); err != nil { - g.log.Println("WARN: could not save the gateway:", err) - } - g.mu.Unlock() - } - return d.ExternalIP() -} - -// managedLearnHostname tries to discover the external ip of the machine. If -// discovering the address failed or if it is invalid, an error is returned. -func (g *Gateway) managedLearnHostname(cancel <-chan struct{}) (net.IP, error) { - // Create ctx to cancel upnp discovery during shutdown. - ctx, ctxCancel := context.WithTimeout(g.threads.StopCtx(), timeoutIPDiscovery) - defer ctxCancel() - go func() { - select { - case <-cancel: - ctxCancel() - case <-g.threads.StopChan(): - ctxCancel() - case <-ctx.Done(): - } - }() - - // Try UPnP first (unless disabled), then peer-to-peer discovery, then - // myexternalip.com. - var host string - var err error - if g.staticUseUPNP { - host, err = g.managedIPFromUPNP(ctx) - } - if err != nil || host == "" { - host, err = g.managedIPFromPeers(ctx.Done()) - } - if err != nil || host == "" { - host, err = myExternalIP() - } - if err != nil { - return nil, modules.AddContext(err, "failed to discover external IP") - } - ip := net.ParseIP(host) - if ip == nil { - return nil, fmt.Errorf("%v is not a valid IP", host) - } - return ip, nil -} - -// threadedLearnHostname discovers the external IP of the Gateway regularly. -func (g *Gateway) threadedLearnHostname() { - if err := g.threads.Add(); err != nil { - return - } - defer g.threads.Done() - - for { - host, err := g.managedLearnHostname(nil) - if err != nil { - g.log.Println("WARN: failed to discover external IP:", err) - } - // If we were unable to discover our IP we try again later. - if err != nil { - if !g.managedSleep(rediscoverIPIntervalFailure) { - return // Shutdown interrupted sleep. - } - continue - } - - g.mu.RLock() - addr := modules.NetAddress(net.JoinHostPort(host.String(), g.port)) - g.mu.RUnlock() - if err := addr.IsValid(); err != nil { - g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) - if !g.managedSleep(rediscoverIPIntervalFailure) { - return // Shutdown interrupted sleep. - } - continue - } - - g.mu.Lock() - oldAddr := g.myAddr - g.myAddr = addr - g.mu.Unlock() - - if addr != oldAddr { - g.log.Println("INFO: our address is", addr) - } - - // Rediscover the IP later in case it changed. - if !g.managedSleep(rediscoverIPIntervalSuccess) { - return // Shutdown interrupted sleep. - } - } -} - -// managedForwardPort adds a port mapping to the router. -func (g *Gateway) managedForwardPort(port string) error { - if !g.staticUseUPNP { - // UPnP is disabled. - return nil - } - - // If the port is invalid, there is no need to perform any of the other - // tasks. - portInt, err := strconv.Atoi(port) - if err != nil { - return err - } - - // Create a context to stop UPnP discovery in case of a shutdown. - ctx, cancel := context.WithCancel(g.threads.StopCtx()) - defer cancel() - go func() { - select { - case <-g.threads.StopChan(): - cancel() - case <-ctx.Done(): - } - }() - - // Look for UPnP-enabled devices. - d, err := upnp.DiscoverCtx(ctx) - if err != nil { - err = fmt.Errorf("WARN: could not automatically forward port %s: no UPnP-enabled devices found: %s", port, err) - return err - } - - // Forward port. - err = d.ForwardTCP(uint16(portInt), "Satellite RPC") - if err != nil { - err = fmt.Errorf("WARN: could not automatically forward port %s: %s", port, err) - return err - } - - // Establish port-clearing at shutdown. - g.threads.AfterStop(func() { - g.managedClearPort(port) - }) - - return nil -} - -// managedClearPort removes a port mapping from the router. -func (g *Gateway) managedClearPort(port string) { - ctx, cancel := context.WithCancel(g.threads.StopCtx()) - defer cancel() - go func() { - select { - case <-g.threads.StopChan(): - cancel() - case <-ctx.Done(): - } - }() - d, err := upnp.DiscoverCtx(ctx) - if err != nil { - return - } - - portInt, _ := strconv.Atoi(port) - err = d.Clear(uint16(portInt)) - if err != nil { - g.log.Printf("WARN: could not automatically unforward port %s: %s\n", port, err) - return - } - - g.log.Println("INFO: successfully unforwarded port", port) -} - -// threadedForwardPort forwards a port and logs potential errors. -func (g *Gateway) threadedForwardPort(port string) { - if err := g.threads.Add(); err != nil { - return - } - defer g.threads.Done() - - if err := g.managedForwardPort(port); err != nil { - return - } - g.log.Println("INFO: successfully forwarded port", port) -} diff --git a/modules/helpers.go b/modules/helpers.go index bd0dfe1..9ca26d7 100644 --- a/modules/helpers.go +++ b/modules/helpers.go @@ -1,8 +1,10 @@ package modules import ( + "bytes" "encoding/binary" "fmt" + "io" "math" "math/big" @@ -37,11 +39,11 @@ func FromFloat(f float64) types.Currency { r := new(big.Rat).Mul(h, new(big.Rat).SetFloat64(f)) nBuf := make([]byte, 16) n := r.Num().Bytes() - copy(nBuf[16 - len(n):], n[:]) + copy(nBuf[16-len(n):], n[:]) num := types.NewCurrency(binary.BigEndian.Uint64(nBuf[8:]), binary.BigEndian.Uint64(nBuf[:8])) dBuf := make([]byte, 16) d := r.Denom().Bytes() - copy(dBuf[16 - len(d):], d[:]) + copy(dBuf[16-len(d):], d[:]) denom := types.NewCurrency(binary.BigEndian.Uint64(dBuf[8:]), binary.BigEndian.Uint64(dBuf[:8])) return num.Div(denom) } @@ -53,11 +55,11 @@ func MulFloat(c types.Currency, f float64) types.Currency { x = x.Mul(x, y) nBuf := make([]byte, 16) n := x.Num().Bytes() - copy(nBuf[16 - len(n):], n[:]) + copy(nBuf[16-len(n):], n[:]) num := types.NewCurrency(binary.BigEndian.Uint64(nBuf[8:]), binary.BigEndian.Uint64(nBuf[:8])) dBuf := make([]byte, 16) d := x.Denom().Bytes() - copy(dBuf[16 - len(d):], d[:]) + copy(dBuf[16-len(d):], d[:]) denom := types.NewCurrency(binary.BigEndian.Uint64(dBuf[8:]), binary.BigEndian.Uint64(dBuf[:8])) return num.Div(denom) } @@ -66,7 +68,7 @@ func MulFloat(c types.Currency, f float64) types.Currency { func Tax(height uint64, payout types.Currency) types.Currency { // First 21,000 blocks need to be treated differently. i := payout.Big() - if height + 1 < TaxHardforkHeight { + if height+1 < 21000 { r := new(big.Rat).SetInt(i) r.Mul(r, new(big.Rat).SetFloat64(0.039)) i.Div(r.Num(), r.Denom()) @@ -76,7 +78,7 @@ func Tax(height uint64, payout types.Currency) types.Currency { } // Round down to multiple of SiafundCount. - i.Sub(i, new(big.Int).Mod(i, big.NewInt(int64(SiafundCount)))) + i.Sub(i, new(big.Int).Mod(i, big.NewInt(10000))) // Convert to currency. lo := i.Uint64() @@ -145,61 +147,23 @@ func FilesizeUnits(size uint64) string { return fmt.Sprintf("%.*f %s", i, float64(size)/math.Pow10(3*i), sizes[i]) } -// StorageProofOutputID returns the ID of an output created by a file -// contract, given the status of the storage proof. The ID is calculating by -// hashing the concatenation of the StorageProofOutput Specifier, the ID of -// the file contract that the proof is for, a boolean indicating whether the -// proof was valid (true) or missed (false), and the index of the output -// within the file contract. -func StorageProofOutputID(fcid types.FileContractID, proofStatus bool, i int) types.SiacoinOutputID { - h := types.NewHasher() - types.SpecifierStorageProof.EncodeTo(h.E) - fcid.EncodeTo(h.E) - h.E.WriteBool(proofStatus) - h.E.WriteUint64(uint64(i)) - return types.SiacoinOutputID(h.Sum()) +// CopyTransaction creates a deep copy of the transaction. +func CopyTransaction(txn types.Transaction) types.Transaction { + var newTxn types.Transaction + var buf bytes.Buffer + e := types.NewEncoder(&buf) + txn.EncodeTo(e) + e.Flush() + d := types.NewDecoder(io.LimitedReader{R: &buf, N: int64(buf.Len())}) + newTxn.DecodeFrom(d) + return newTxn } -// CalculateCoinbase calculates the coinbase for a given height. The coinbase -// equation is: -// coinbase := max(InitialCoinbase - height, MinimumCoinbase) -func CalculateCoinbase(height uint64) types.Currency { - base := InitialCoinbase - height - if height > InitialCoinbase || base < MinimumCoinbase { - base = MinimumCoinbase - } - return types.NewCurrency64(base).Mul(types.HastingsPerSiacoin) -} - -// CalculateSubsidy takes a block and a height and determines the block -// subsidy. -func CalculateSubsidy(b types.Block, height uint64) types.Currency { - subsidy := CalculateCoinbase(height) - for _, txn := range b.Transactions { - for _, fee := range txn.MinerFees { - subsidy = subsidy.Add(fee) - } - } - return subsidy -} - -// CalculateNumSiacoins calculates the number of siacoins in circulation at a -// given height. -func CalculateNumSiacoins(height uint64) (total types.Currency) { - total = numGenesisSiacoins - deflationBlocks := InitialCoinbase - MinimumCoinbase - avgDeflationSiacoins := CalculateCoinbase(0).Add(CalculateCoinbase(height)).Div64(2) - if height <= deflationBlocks { - total = total.Add(avgDeflationSiacoins.Mul64(height + 1)) - } else { - total = total.Add(avgDeflationSiacoins.Mul64(deflationBlocks + 1)) - total = total.Add(CalculateCoinbase(height).Mul64(height - deflationBlocks)) - } - if height >= FoundationHardforkHeight { - total = total.Add(InitialFoundationSubsidy) - perSubsidy := FoundationSubsidyPerBlock.Mul64(FoundationSubsidyFrequency) - subsidies := (height - FoundationHardforkHeight) / FoundationSubsidyFrequency - total = total.Add(perSubsidy.Mul64(subsidies)) - } - return +// EncodedLen returns the encoded length of a transaction. +func EncodedLen(txn types.Transaction) int { + var buf bytes.Buffer + e := types.NewEncoder(&buf) + txn.EncodeTo(e) + e.Flush() + return buf.Len() } diff --git a/modules/manager.go b/modules/manager.go index 843c4d6..4fb190e 100644 --- a/modules/manager.go +++ b/modules/manager.go @@ -27,26 +27,26 @@ type HostAverages struct { func (ha *HostAverages) EncodeTo(e *types.Encoder) { e.WriteUint64(ha.NumHosts) e.WriteUint64(ha.Duration) - ha.StoragePrice.EncodeTo(e) - ha.Collateral.EncodeTo(e) - ha.DownloadBandwidthPrice.EncodeTo(e) - ha.UploadBandwidthPrice.EncodeTo(e) - ha.ContractPrice.EncodeTo(e) - ha.BaseRPCPrice.EncodeTo(e) - ha.SectorAccessPrice.EncodeTo(e) + types.V1Currency(ha.StoragePrice).EncodeTo(e) + types.V1Currency(ha.Collateral).EncodeTo(e) + types.V1Currency(ha.DownloadBandwidthPrice).EncodeTo(e) + types.V1Currency(ha.UploadBandwidthPrice).EncodeTo(e) + types.V1Currency(ha.ContractPrice).EncodeTo(e) + types.V1Currency(ha.BaseRPCPrice).EncodeTo(e) + types.V1Currency(ha.SectorAccessPrice).EncodeTo(e) } // DecodeFrom implements types.DecoderFrom. func (ha *HostAverages) DecodeFrom(d *types.Decoder) { ha.NumHosts = d.ReadUint64() ha.Duration = d.ReadUint64() - ha.StoragePrice.DecodeFrom(d) - ha.Collateral.DecodeFrom(d) - ha.DownloadBandwidthPrice.DecodeFrom(d) - ha.UploadBandwidthPrice.DecodeFrom(d) - ha.ContractPrice.DecodeFrom(d) - ha.BaseRPCPrice.DecodeFrom(d) - ha.SectorAccessPrice.DecodeFrom(d) + (*types.V1Currency)(&ha.StoragePrice).DecodeFrom(d) + (*types.V1Currency)(&ha.Collateral).DecodeFrom(d) + (*types.V1Currency)(&ha.DownloadBandwidthPrice).DecodeFrom(d) + (*types.V1Currency)(&ha.UploadBandwidthPrice).DecodeFrom(d) + (*types.V1Currency)(&ha.ContractPrice).DecodeFrom(d) + (*types.V1Currency)(&ha.BaseRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&ha.SectorAccessPrice).DecodeFrom(d) } // UserBalance holds the current balance as well as @@ -195,8 +195,6 @@ func (fm *FilterMode) FromString(s string) error { // A HostDB is a database of hosts that the manager can use for figuring out // who to upload to, and download from. type HostDB interface { - Alerter - // ActiveHosts returns the list of hosts that are actively being selected // from. ActiveHosts() ([]HostDBEntry, error) @@ -276,8 +274,6 @@ type HostDB interface { // Manager implements the methods necessary to communicate with the // hosts. type Manager interface { - Alerter - // AcceptContracts accepts a set of contracts from the renter // and adds them to the contract set. AcceptContracts(types.PublicKey, []ContractMetadata) @@ -338,9 +334,8 @@ type Manager interface { // DownloadObject downloads an object and returns it. DownloadObject(io.Writer, types.PublicKey, []byte, []byte) error - // FeeEstimation returns the minimum and the maximum estimated fees for - // a transaction. - FeeEstimation() (types.Currency, types.Currency) + // FeeEstimation returns the recommended fee for a transaction. + FeeEstimation() types.Currency // Filter returns the HostDB's filterMode and filteredHosts. Filter() (FilterMode, map[string]types.PublicKey, []string, error) @@ -375,9 +370,6 @@ type Manager interface { // GetSpendings retrieves the user's spendings. GetSpendings(string, int, int) (UserSpendings, error) - // GetWalletSeed returns the wallet seed. - GetWalletSeed() (Seed, error) - // Host returns the host associated with the given public key. Host(types.PublicKey) (HostDBEntry, bool, error) @@ -661,7 +653,7 @@ func (a Allowance) Active() bool { // EncodeTo implements types.EncoderTo. func (a *Allowance) EncodeTo(e *types.Encoder) { - a.Funds.EncodeTo(e) + types.V1Currency(a.Funds).EncodeTo(e) e.WriteUint64(a.Hosts) e.WriteUint64(a.Period) e.WriteUint64(a.RenewWindow) @@ -670,20 +662,20 @@ func (a *Allowance) EncodeTo(e *types.Encoder) { e.WriteUint64(a.ExpectedDownload) e.WriteUint64(a.MinShards) e.WriteUint64(a.TotalShards) - a.MaxRPCPrice.EncodeTo(e) - a.MaxContractPrice.EncodeTo(e) - a.MaxDownloadBandwidthPrice.EncodeTo(e) - a.MaxSectorAccessPrice.EncodeTo(e) - a.MaxStoragePrice.EncodeTo(e) - a.MaxUploadBandwidthPrice.EncodeTo(e) - a.MinMaxCollateral.EncodeTo(e) + types.V1Currency(a.MaxRPCPrice).EncodeTo(e) + types.V1Currency(a.MaxContractPrice).EncodeTo(e) + types.V1Currency(a.MaxDownloadBandwidthPrice).EncodeTo(e) + types.V1Currency(a.MaxSectorAccessPrice).EncodeTo(e) + types.V1Currency(a.MaxStoragePrice).EncodeTo(e) + types.V1Currency(a.MaxUploadBandwidthPrice).EncodeTo(e) + types.V1Currency(a.MinMaxCollateral).EncodeTo(e) e.WriteUint64(a.BlockHeightLeeway) e.WriteBool(a.UploadPacking) } // DecodeFrom implements types.DecoderFrom. func (a *Allowance) DecodeFrom(d *types.Decoder) { - a.Funds.DecodeFrom(d) + (*types.V1Currency)(&a.Funds).DecodeFrom(d) a.Hosts = d.ReadUint64() a.Period = d.ReadUint64() a.RenewWindow = d.ReadUint64() @@ -692,13 +684,13 @@ func (a *Allowance) DecodeFrom(d *types.Decoder) { a.ExpectedDownload = d.ReadUint64() a.MinShards = d.ReadUint64() a.TotalShards = d.ReadUint64() - a.MaxRPCPrice.DecodeFrom(d) - a.MaxContractPrice.DecodeFrom(d) - a.MaxDownloadBandwidthPrice.DecodeFrom(d) - a.MaxSectorAccessPrice.DecodeFrom(d) - a.MaxStoragePrice.DecodeFrom(d) - a.MaxUploadBandwidthPrice.DecodeFrom(d) - a.MinMaxCollateral.DecodeFrom(d) + (*types.V1Currency)(&a.MaxRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&a.MaxContractPrice).DecodeFrom(d) + (*types.V1Currency)(&a.MaxDownloadBandwidthPrice).DecodeFrom(d) + (*types.V1Currency)(&a.MaxSectorAccessPrice).DecodeFrom(d) + (*types.V1Currency)(&a.MaxStoragePrice).DecodeFrom(d) + (*types.V1Currency)(&a.MaxUploadBandwidthPrice).DecodeFrom(d) + (*types.V1Currency)(&a.MinMaxCollateral).DecodeFrom(d) a.BlockHeightLeeway = d.ReadUint64() a.UploadPacking = d.ReadBool() } diff --git a/modules/manager/alert.go b/modules/manager/alert.go deleted file mode 100644 index ea82fd8..0000000 --- a/modules/manager/alert.go +++ /dev/null @@ -1,15 +0,0 @@ -package manager - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the manager. -func (m *Manager) Alerts() (crit, err, warn, info []modules.Alert) { - crit, err, warn, info = m.staticAlerter.Alerts() - contractorCrit, contractorErr, contractorWarn, contractorInfo := m.hostContractor.Alerts() - hostdbCrit, hostdbErr, hostdbWarn, hostdbInfo := m.hostDB.Alerts() - crit = append(append(crit, contractorCrit...), hostdbCrit...) - err = append(append(err, contractorErr...), hostdbErr...) - warn = append(append(warn, contractorWarn...), hostdbWarn...) - info = append(append(info, contractorInfo...), hostdbInfo...) - return -} diff --git a/modules/manager/contractor/alert.go b/modules/manager/contractor/alert.go deleted file mode 100644 index fb0dd04..0000000 --- a/modules/manager/contractor/alert.go +++ /dev/null @@ -1,9 +0,0 @@ -package contractor - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the contractor. It returns -// all alerts of the contractor. -func (c *Contractor) Alerts() (crit, err, warn, info []modules.Alert) { - return c.staticAlerter.Alerts() -} diff --git a/modules/manager/contractor/allowance.go b/modules/manager/contractor/allowance.go index cafc5a8..a42b181 100644 --- a/modules/manager/contractor/allowance.go +++ b/modules/manager/contractor/allowance.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -72,7 +73,7 @@ func (c *Contractor) SetAllowance(rpk types.PublicKey, a modules.Allowance) erro return ErrAllowanceZeroExpectedDownload } else if a.MinShards == 0 || a.TotalShards == 0 { return ErrAllowanceWrongRedundancy - } else if !c.cs.Synced() { + } else if !c.s.Synced() { return errAllowanceNotSynced } @@ -87,7 +88,7 @@ func (c *Contractor) SetAllowance(rpk types.PublicKey, a modules.Allowance) erro if reflect.DeepEqual(a, renter.Allowance) { return nil } - c.log.Printf("INFO: setting allowance for %v\n", rpk) + c.log.Info("setting allowance for", zap.Stringer("renter", rpk)) // Set the current period if the existing allowance is empty. // @@ -110,7 +111,7 @@ func (c *Contractor) SetAllowance(rpk types.PublicKey, a modules.Allowance) erro c.mu.Lock() unlockContracts := false if reflect.DeepEqual(renter.Allowance, modules.Allowance{}) { - renter.CurrentPeriod = c.blockHeight + renter.CurrentPeriod = c.tip.Height if a.Period > a.RenewWindow { renter.CurrentPeriod -= a.RenewWindow } @@ -121,7 +122,7 @@ func (c *Contractor) SetAllowance(rpk types.PublicKey, a modules.Allowance) erro c.mu.Unlock() err := c.UpdateRenter(renter) if err != nil { - c.log.Println("ERROR: unable to update renter after setting allowance:", err) + c.log.Error("unable to update renter after setting allowance", zap.Error(err)) } // Cycle through all contracts and unlock them again since they might have @@ -165,7 +166,7 @@ func (c *Contractor) managedCancelAllowance(rpk types.PublicKey) error { return ErrRenterNotFound } - c.log.Printf("INFO: canceling allowance of %v\n", rpk) + c.log.Info("canceling allowance", zap.Stringer("renter", rpk)) // First need to mark all active contracts. ids := c.staticContracts.IDs(rpk) diff --git a/modules/manager/contractor/consts.go b/modules/manager/contractor/consts.go index cf8bab9..64f2491 100644 --- a/modules/manager/contractor/consts.go +++ b/modules/manager/contractor/consts.go @@ -25,25 +25,6 @@ const ( fundAccountTimeout = 10 * time.Second ) -// Constants related to the contractor's alerts. -const ( - // AlertCauseInsufficientAllowanceFunds indicates that the cause for the - // alert was insufficient allowance funds remaining. - AlertCauseInsufficientAllowanceFunds = "Insufficient allowance funds remaining" - - // AlertMSGAllowanceLowFunds indicates that forming/renewing a contract during - // contract maintenance isn't possible due to the allowance being low on - // funds. - AlertMSGAllowanceLowFunds = "At least one contract formation/renewal failed due to the allowance being low on funds" - - // AlertMSGFailedContractRenewal indicates that the contract renewal failed. - AlertMSGFailedContractRenewal = "Contractor is attempting to renew/refresh contracts but failed" - - // AlertMSGWalletLockedDuringMaintenance indicates that forming/renewing a - // contract during contract maintenance isn't possible due to a locked wallet. - AlertMSGWalletLockedDuringMaintenance = "At least one contract failed to form/renew due to the wallet being locked" -) - // Constants related to contract formation parameters. const ( // ContractFeeFundingMulFactor is the multiplying factor for contract fees diff --git a/modules/manager/contractor/contractmaintenance.go b/modules/manager/contractor/contractmaintenance.go index a6d578a..32d00ec 100644 --- a/modules/manager/contractor/contractmaintenance.go +++ b/modules/manager/contractor/contractmaintenance.go @@ -2,6 +2,7 @@ package contractor import ( "errors" + "fmt" "math/big" "slices" "time" @@ -9,7 +10,9 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/contractor/contractset" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" + rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" ) @@ -36,7 +39,7 @@ var ( // marks down the host score, and marks the contract as !GoodForRenew and // !GoodForUpload. func (c *Contractor) callNotifyDoubleSpend(fcID types.FileContractID, blockHeight uint64) { - c.log.Println("WARN: watchdog found a double-spend: ", fcID, blockHeight) + c.log.Warn("watchdog found a double-spend", zap.Stringer("fcid", fcID), zap.Uint64("height", blockHeight)) // Mark the contract as double-spent. This will cause the contract to be // excluded in period spending. @@ -46,7 +49,7 @@ func (c *Contractor) callNotifyDoubleSpend(fcID types.FileContractID, blockHeigh err := c.MarkContractBad(fcID) if err != nil { - c.log.Println("callNotifyDoubleSpend error in MarkContractBad", err) + c.log.Error("callNotifyDoubleSpend error in MarkContractBad", zap.Error(err)) } } @@ -71,7 +74,7 @@ func (c *Contractor) managedCheckForDuplicates() { } else { newContract, oldContract = contract, rc } - c.log.Printf("WARN: duplicate contract found. New contract is %x and old contract is %v\n", newContract.ID, oldContract.ID) + c.log.Warn(fmt.Sprintf("duplicate contract found. New contract is %x and old contract is %v", newContract.ID, oldContract.ID)) // Get FileContract. oldSC, ok := c.staticContracts.Acquire(oldContract.ID) @@ -100,7 +103,7 @@ func (c *Contractor) managedCheckForDuplicates() { c.staticContracts.Erase(oldSC.Metadata().ID) err := c.updateRenewedContract(oldContract.ID, newContract.ID) if err != nil { - c.log.Println("ERROR: failed to update renewal history.") + c.log.Error("failed to update renewal history", zap.Error(err)) } // Update the pubkeys map to contain the newest contract id. @@ -162,7 +165,7 @@ func (c *Contractor) managedEstimateRenewFundingRequirements(contract modules.Re // nothing to do otherwise. currentContract, exists := c.staticContracts.OldContract(currentID) if !exists { - c.log.Println("WARN: a known previous contract is not found in old contracts") + c.log.Warn("a known previous contract is not found in old contracts") break } @@ -224,8 +227,8 @@ func (c *Contractor) managedEstimateRenewFundingRequirements(contract modules.Re // Get an estimate for how much money we will be charged before going into // the transaction pool. - _, maxTxnFee := c.tpool.FeeEstimation() - txnFees := maxTxnFee.Mul64(2 * modules.EstimatedFileContractTransactionSetSize) + fee := c.cm.RecommendedFee() + txnFees := fee.Mul64(2 * 2048) // Add them all up and then return the estimate plus 33% for error margin // and just general volatility of usage pattern. @@ -279,7 +282,7 @@ func (c *Contractor) managedPruneRedundantAddressRange() { // hosts. badHosts, err := c.hdb.CheckForIPViolations(pks) if err != nil { - c.log.Println("WARN: error checking for IP violations:", err) + c.log.Warn("error checking for IP violations", zap.Error(err)) return } for _, host := range badHosts { @@ -287,7 +290,7 @@ func (c *Contractor) managedPruneRedundantAddressRange() { // to iterate through those, too. for _, fcid := range cids[host] { if err := c.managedCancelContract(fcid); err != nil { - c.log.Println("WARN: unable to cancel contract in managedPruneRedundantAddressRange", err) + c.log.Warn("unable to cancel contract in managedPruneRedundantAddressRange", zap.Error(err)) } } } @@ -314,7 +317,7 @@ func (c *Contractor) managedUpdateContractUtility(fileContract *contractset.File _, exists := c.renewedTo[fileContract.Metadata().ID] c.mu.Unlock() if exists && (utility.GoodForRenew || utility.GoodForUpload) { - c.log.Println("CRITICAL: attempting to update contract utility on a contract that has been renewed") + c.log.Error("attempting to update contract utility on a contract that has been renewed") } return fileContract.UpdateUtility(utility) @@ -335,20 +338,20 @@ func (c *Contractor) threadedContractMaintenance() { // Skip if a satellite maintenance is running. if c.m.Maintenance() { - c.log.Println("INFO: skipping contract maintenance because satellite maintenance is running") + c.log.Info("skipping contract maintenance because satellite maintenance is running") return } // No contract maintenance unless contractor is synced. if !c.managedSynced() { - c.log.Println("INFO: skipping contract maintenance since consensus isn't synced yet") + c.log.Info("skipping contract maintenance since consensus isn't synced yet") return } // No contract maintenance unless the wallet is synced. - height, err := c.wallet.Height() - if err != nil || height != c.blockHeight { - c.log.Println("INFO: skipping contract maintenance since wallet isn't synced yet") + height := c.wallet.Tip().Height + if height != c.tip.Height { + c.log.Info("skipping contract maintenance since wallet isn't synced yet") return } @@ -356,15 +359,15 @@ func (c *Contractor) threadedContractMaintenance() { // fine to return early if another thread is already doing maintenance. // The next block will trigger another round. if !c.maintenanceLock.TryLock() { - c.log.Println("ERROR: maintenance lock could not be obtained") + c.log.Error("maintenance lock could not be obtained") return } defer c.maintenanceLock.Unlock() - c.log.Println("INFO: performing contract maintenance") + c.log.Info("performing contract maintenance") // Get the current block height. c.mu.Lock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height renters := c.renters c.mu.Unlock() @@ -383,7 +386,7 @@ func (c *Contractor) threadedContractMaintenance() { } err = c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) return } @@ -396,11 +399,11 @@ func (c *Contractor) threadedContractMaintenance() { // list. var renewSet []fileContractRenewal var refreshSet []fileContractRenewal - _, maxFee := c.tpool.FeeEstimation() + fee := c.cm.RecommendedFee() for _, contract := range c.staticContracts.ViewAll() { renter, err := c.managedFindRenter(contract.ID) if err != nil { - c.log.Println("WARN: unable to find renter for contract", contract.ID) + c.log.Warn("unable to find renter for contract", zap.Stringer("id", contract.ID)) continue } @@ -413,7 +416,7 @@ func (c *Contractor) threadedContractMaintenance() { // settings. host, _, err := c.hdb.Host(contract.HostPublicKey) if err != nil { - c.log.Println("WARN: error getting host", err) + c.log.Warn("error getting host", zap.Error(err)) continue } if host.Filtered { @@ -433,26 +436,26 @@ func (c *Contractor) threadedContractMaintenance() { // Fetch the price table. pt, err := proto.FetchPriceTable(host) if err != nil { - c.log.Printf("WARN: unable to fetch price table from %s: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("unable to fetch price table from %s", host.Settings.NetAddress), zap.Error(err)) continue } // Check if the host is gouging. - if err := modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, maxFee); err != nil { - c.log.Printf("WARN: gouging detected at host %s: %v\n", host.Settings.NetAddress, err) + if err := modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, fee); err != nil { + c.log.Warn(fmt.Sprintf("gouging detected at host %s", host.Settings.NetAddress), zap.Error(err)) continue } // Calculate the host's score. sb, err := c.hdb.EstimateHostScore(renter.Allowance, host) if err != nil { - c.log.Printf("ERROR: unable to calculate host score of %s: %v\n", host.Settings.NetAddress, err) + c.log.Error(fmt.Sprintf("unable to calculate host score of %s", host.Settings.NetAddress), zap.Error(err)) continue } renewAmount, err := c.managedEstimateRenewFundingRequirements(contract, blockHeight, renter.Allowance) if err != nil { - c.log.Println("WARN: contract skipped because there was an error estimating renew funding requirements", renewAmount, err) + c.log.Warn("contract skipped because there was an error estimating renew funding requirements", zap.Stringer("amount", renewAmount), zap.Error(err)) continue } renewSet = append(renewSet, fileContractRenewal{ @@ -475,10 +478,10 @@ func (c *Contractor) threadedContractMaintenance() { // if less than 'minContractFundRenewalThreshold' funds are remaining // (3% at time of writing), or if there is less than 3 sectors worth of // storage+upload+download remaining. - blockBytes := types.NewCurrency64(modules.SectorSize * uint64(renter.Allowance.Period)) + blockBytes := types.NewCurrency64(rhpv2.SectorSize * uint64(renter.Allowance.Period)) sectorStoragePrice := host.Settings.StoragePrice.Mul(blockBytes) - sectorUploadBandwidthPrice := host.Settings.UploadBandwidthPrice.Mul64(modules.SectorSize) - sectorDownloadBandwidthPrice := host.Settings.DownloadBandwidthPrice.Mul64(modules.SectorSize) + sectorUploadBandwidthPrice := host.Settings.UploadBandwidthPrice.Mul64(rhpv2.SectorSize) + sectorDownloadBandwidthPrice := host.Settings.DownloadBandwidthPrice.Mul64(rhpv2.SectorSize) sectorBandwidthPrice := sectorUploadBandwidthPrice.Add(sectorDownloadBandwidthPrice) sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) percentRemaining, _ := big.NewRat(0, 1).SetFrac(contract.RenterFunds.Big(), contract.TotalCost.Big()).Float64() @@ -486,13 +489,13 @@ func (c *Contractor) threadedContractMaintenance() { // Fetch the price table. pt, err := proto.FetchPriceTable(host) if err != nil { - c.log.Printf("WARN: unable to fetch price table from %s: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("unable to fetch price table from %s", host.Settings.NetAddress), zap.Error(err)) continue } // Check if the host is gouging. - if err := modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, maxFee); err != nil { - c.log.Printf("WARN: gouging detected at host %s: %v\n", host.Settings.NetAddress, err) + if err := modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, fee); err != nil { + c.log.Warn(fmt.Sprintf("gouging detected at host %s", host.Settings.NetAddress), zap.Error(err)) continue } @@ -523,7 +526,7 @@ func (c *Contractor) threadedContractMaintenance() { } if len(renewSet) != 0 || len(refreshSet) != 0 { - c.log.Printf("INFO: renewing %v contracts and refreshing %v contracts", len(renewSet), len(refreshSet)) + c.log.Info(fmt.Sprintf("renewing %v contracts and refreshing %v contracts", len(renewSet), len(refreshSet))) } // Update the failed renew map so that it only contains contracts which we @@ -551,16 +554,10 @@ func (c *Contractor) threadedContractMaintenance() { // Return here if an interrupt or kill signal has been sent. select { case <-c.tg.StopChan(): - c.log.Println("INFO: returning because the contractor was stopped") + c.log.Info("returning because the contractor was stopped") default: } - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - c.log.Println("WARN: contractor is attempting to renew contracts that are about to expire, however the wallet is locked") - return - } - // Get the renter. The error is ignored since we know already that // the renter exists. renter, _ := c.managedFindRenter(renewal.contract.ID) @@ -568,16 +565,16 @@ func (c *Contractor) threadedContractMaintenance() { // Check if the renter has a sufficient balance. ub, err := c.m.GetBalance(renter.Email) if err != nil { - c.log.Println("ERROR: couldn't get renter balance:", err) + c.log.Error("couldn't get renter balance", zap.Error(err)) continue } cost := modules.Float64(renewal.amount) if !ub.Subscribed && ub.Balance < cost/hastings { - c.log.Println("INFO: renewal skipped, because renter balance is insufficient") + c.log.Info("renewal skipped, because renter balance is insufficient", zap.String("renter", renter.Email)) continue } if ub.OnHold > 0 && ub.OnHold < uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds())) { - c.log.Println("INFO: renewal skipped, because renter account is on hold") + c.log.Info("renewal skipped, because renter account is on hold", zap.String("renter", renter.Email)) continue } @@ -592,7 +589,7 @@ func (c *Contractor) threadedContractMaintenance() { } } if goodContracts > int(renter.Allowance.Hosts)+hostBufferForRenewals { - c.log.Printf("INFO: renewal skipped, because renter has already enough contracts: %v > %v", goodContracts, renter.Allowance.Hosts) + c.log.Info(fmt.Sprintf("renewal skipped, because renter has already enough contracts: %v > %v", goodContracts, renter.Allowance.Hosts)) continue } @@ -602,9 +599,9 @@ func (c *Contractor) threadedContractMaintenance() { fundsSpent, newContract, err := c.managedRenewContract(renewal.contract, renewal.renterPubKey, renewal.secretKey, renewal.amount, renter.ContractEndHeight()) if modules.ContainsError(err, errContractNotGFR) { // Do not add a renewal error. - c.log.Println("INFO: contract skipped because it is not good for renew", renewal.contract.ID) + c.log.Info("contract skipped because it is not good for renew", zap.Stringer("id", renewal.contract.ID)) } else if err != nil { - c.log.Println("ERROR: error renewing a contract", renewal.contract.ID, err) + c.log.Error("error renewing a contract", zap.Stringer("id", renewal.contract.ID), zap.Error(err)) renewErr = modules.ComposeErrors(renewErr, err) numRenewFails++ } @@ -615,12 +612,12 @@ func (c *Contractor) threadedContractMaintenance() { amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds:", err) + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of renewals in the database. err = c.m.IncrementStats(renter.Email, true) if err != nil { - c.log.Println("ERROR: couldn't update stats:", err) + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -629,14 +626,14 @@ func (c *Contractor) threadedContractMaintenance() { GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) continue } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } } } @@ -645,16 +642,10 @@ func (c *Contractor) threadedContractMaintenance() { // Return here if an interrupt or kill signal has been sent. select { case <-c.tg.StopChan(): - c.log.Println("INFO: returning because the contractor was stopped") + c.log.Info("returning because the contractor was stopped") default: } - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - c.log.Println("WARN: contractor is attempting to renew contracts that are about to expire, however the wallet is locked") - return - } - // Get the renter. The error is ignored since we know already that // the renter exists. renter, _ := c.managedFindRenter(renewal.contract.ID) @@ -662,16 +653,16 @@ func (c *Contractor) threadedContractMaintenance() { // Check if the renter has a sufficient balance. ub, err := c.m.GetBalance(renter.Email) if err != nil { - c.log.Println("ERROR: couldn't get renter balance:", err) + c.log.Error("couldn't get renter balance", zap.Error(err)) continue } cost := modules.Float64(renewal.amount) if !ub.Subscribed && ub.Balance < cost/hastings { - c.log.Println("INFO: renewal skipped, because renter balance is insufficient") + c.log.Info("renewal skipped, because renter balance is insufficient", zap.String("renter", renter.Email)) continue } if ub.OnHold > 0 && ub.OnHold < uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds())) { - c.log.Println("INFO: renewal skipped, because renter account is on hold") + c.log.Info("renewal skipped, because renter account is on hold", zap.String("renter", renter.Email)) continue } @@ -680,7 +671,7 @@ func (c *Contractor) threadedContractMaintenance() { // 'fundsSpent' will return '0'. fundsSpent, newContract, err := c.managedRenewContract(renewal.contract, renewal.renterPubKey, renewal.secretKey, renewal.amount, renter.ContractEndHeight()) if err != nil { - c.log.Println("ERROR: error refreshing a contract", renewal.contract.ID, err) + c.log.Error("error refreshing a contract", zap.Stringer("id", renewal.contract.ID), zap.Error(err)) renewErr = modules.ComposeErrors(renewErr, err) numRenewFails++ } @@ -691,12 +682,12 @@ func (c *Contractor) threadedContractMaintenance() { amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds:", err) + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of renewals in the database. err = c.m.IncrementStats(renter.Email, true) if err != nil { - c.log.Println("ERROR: couldn't update stats:", err) + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -705,14 +696,14 @@ func (c *Contractor) threadedContractMaintenance() { GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) continue } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } } } @@ -735,7 +726,7 @@ func (c *Contractor) threadedContractMaintenance() { } neededContracts := int(renter.Allowance.Hosts) - uploadContracts if neededContracts > 0 { - c.log.Printf("INFO: %v need more contracts: %v\n", renter.PublicKey, neededContracts) + c.log.Info(fmt.Sprintf("%v need more contracts: %v", renter.PublicKey, neededContracts)) } // Assemble two exclusion lists. The first one includes all hosts that the @@ -760,12 +751,12 @@ func (c *Contractor) threadedContractMaintenance() { // Get Hosts. hosts, err := c.hdb.RandomHostsWithAllowance(neededContracts*4+randomHostsBufferForScore, blacklist, addressBlacklist, renter.Allowance) if err != nil { - c.log.Println("WARN: not forming new contracts:", err) + c.log.Warn("not forming new contracts", zap.Error(err)) continue } // Calculate the anticipated transaction fee. - txnFee := maxFee.Mul64(modules.EstimatedFileContractTransactionSetSize) + txnFee := fee.Mul64(2048) // Form contracts with the hosts one at a time, until we have enough // contracts. @@ -773,7 +764,7 @@ func (c *Contractor) threadedContractMaintenance() { // Return here if an interrupt or kill signal has been sent. select { case <-c.tg.StopChan(): - c.log.Println("INFO: returning because the manager was stopped") + c.log.Info("returning because the manager was stopped") return default: } @@ -786,14 +777,14 @@ func (c *Contractor) threadedContractMaintenance() { // Fetch the price table. pt, err := proto.FetchPriceTable(host) if err != nil { - c.log.Printf("WARN: unable to fetch price table from %s: %v", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("unable to fetch price table from %s", host.Settings.NetAddress), zap.Error(err)) continue } // Check if the host is gouging. - err = modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, maxFee) + err = modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, fee) if err != nil { - c.log.Printf("WARN: gouging detected at %s: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("gouging detected at %s", host.Settings.NetAddress), zap.Error(err)) continue } @@ -815,47 +806,40 @@ func (c *Contractor) threadedContractMaintenance() { // Check if the renter has a sufficient balance. ub, err := c.m.GetBalance(renter.Email) if err != nil { - c.log.Println("ERROR: couldn't get renter balance:", err) + c.log.Error("couldn't get renter balance", zap.Error(err)) continue } cost := modules.Float64(contractFunds) if !ub.Subscribed && ub.Balance < cost/hastings { - c.log.Println("INFO: contract formation skipped, because renter balance is insufficient") + c.log.Info("contract formation skipped, because renter balance is insufficient", zap.String("renter", renter.Email)) continue } if ub.OnHold > 0 && ub.OnHold < uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds())) { - c.log.Println("INFO: contract formation skipped, because renter account is on hold") + c.log.Info("contract formation skipped, because renter account is on hold", zap.String("renter", renter.Email)) continue } - // Confirm the wallet is still unlocked. - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - c.log.Println("WARN: contractor is attempting to establish new contracts with hosts, however the wallet is locked") - return - } - // Attempt forming a contract with this host. fundsSpent, newContract, err := c.managedNewContract(renter.PublicKey, renter.PrivateKey, host, contractFunds, renter.ContractEndHeight()) if err != nil { - c.log.Printf("WARN: attempted to form a contract with %v, but negotiation failed: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("attempted to form a contract with %v, but negotiation failed", host.Settings.NetAddress), zap.Error(err)) continue } neededContracts-- - c.log.Println("INFO: a new contract has been formed with a host:", newContract.ID) + c.log.Info("a new contract has been formed with a host", zap.Stringer("id", newContract.ID)) // Lock the funds in the database. funds := modules.Float64(fundsSpent) amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds:", err) + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of formations in the database. err = c.m.IncrementStats(renter.Email, false) if err != nil { - c.log.Println("ERROR: couldn't update stats:", err) + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -864,14 +848,14 @@ func (c *Contractor) threadedContractMaintenance() { GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) continue } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("Unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } } } diff --git a/modules/manager/contractor/contractor.go b/modules/manager/contractor/contractor.go index 5289c77..ac20495 100644 --- a/modules/manager/contractor/contractor.go +++ b/modules/manager/contractor/contractor.go @@ -14,18 +14,13 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/contractor/contractset" "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) var ( - errNilDB = errors.New("cannot create contractor with nil database") - errNilCS = errors.New("cannot create contractor with nil consensus set") - errNilHDB = errors.New("cannot create contractor with nil HostDB") - errNilManager = errors.New("cannot create contractor with nil manager") - errNilTpool = errors.New("cannot create contractor with nil transaction pool") - errNilWallet = errors.New("cannot create contractor with nil wallet") - errHostNotFound = errors.New("host not found") errContractNotFound = errors.New("contract not found") ) @@ -34,16 +29,15 @@ var ( // contracts. type Contractor struct { // Dependencies. - cs modules.ConsensusSet - m modules.Manager - db *sql.DB - hdb modules.HostDB - log *persist.Logger - mu sync.RWMutex - staticAlerter *modules.GenericAlerter - tg siasync.ThreadGroup - tpool modules.TransactionPool - wallet modules.Wallet + cm *chain.Manager + s modules.Syncer + m modules.Manager + db *sql.DB + hdb modules.HostDB + log *zap.Logger + mu sync.RWMutex + tg siasync.ThreadGroup + wallet modules.Wallet // Only one thread should be performing contract maintenance at a time. interruptMaintenance chan struct{} @@ -51,9 +45,8 @@ type Contractor struct { uploadingBufferedFiles bool runningUploads map[string]func() - blockHeight uint64 - synced chan struct{} - lastChange modules.ConsensusChangeID + tip types.ChainIndex + synced chan struct{} renters map[types.PublicKey]modules.Renter @@ -132,7 +125,7 @@ func (c *Contractor) PeriodSpending(rpk types.PublicKey) (modules.RenterSpending // Filter out by renter. r, err := c.managedFindRenter(contract.ID) if err != nil { - c.log.Println("ERROR: contract has no known renter associated with it:", contract.ID) + c.log.Error("contract has no known renter associated with it", zap.Stringer("id", contract.ID)) continue } if r.PublicKey != rpk { @@ -158,12 +151,12 @@ func (c *Contractor) PeriodSpending(rpk types.PublicKey) (modules.RenterSpending spending.MaintenanceSpending = spending.MaintenanceSpending.Add(contract.MaintenanceSpending) spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending) spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending) - } else if err != nil && exist && contract.EndHeight+host.Settings.WindowSize+modules.MaturityDelay > c.blockHeight { + } else if err != nil && exist && contract.EndHeight+host.Settings.WindowSize+144 > c.tip.Height { // Calculate funds that are being withheld in contracts. spending.WithheldFunds = spending.WithheldFunds.Add(contract.RenterFunds) // Record the largest window size for worst case when reporting the spending. - if contract.EndHeight+host.Settings.WindowSize+modules.MaturityDelay >= spending.ReleaseBlock { - spending.ReleaseBlock = contract.EndHeight + host.Settings.WindowSize + modules.MaturityDelay + if contract.EndHeight+host.Settings.WindowSize+144 >= spending.ReleaseBlock { + spending.ReleaseBlock = contract.EndHeight + host.Settings.WindowSize + 144 } // Calculate Previous spending. spending.PreviousSpending = spending.PreviousSpending.Add(contract.ContractFee).Add(contract.TxnFee).Add(contract.SiafundFee).Add(contract.DownloadSpending).Add(contract.UploadSpending).Add(contract.StorageSpending).Add(contract.FundAccountSpending).Add(contract.MaintenanceSpending.Sum()) @@ -222,7 +215,7 @@ func (c *Contractor) RefreshedContract(fcid types.FileContractID) bool { // Grab the contract to check its end height. contract, ok := c.staticContracts.OldContract(fcid) if !ok { - c.log.Println("ERROR: contract not found in oldContracts, despite there being a renewal to the contract") + c.log.Error("contract not found in oldContracts, despite there being a renewal to the contract", zap.Stringer("fcid", fcid)) return false } @@ -231,7 +224,7 @@ func (c *Contractor) RefreshedContract(fcid types.FileContractID) bool { if !ok { newContract, ok = c.staticContracts.OldContract(newFCID) if !ok { - c.log.Println("ERROR: contract was not found in the database, despite their being another contract that claims to have renewed to it.") + c.log.Error("contract was not found in the database, despite their being another contract that claims to have renewed to it", zap.Stringer("fcid", newFCID)) return false } } @@ -256,66 +249,47 @@ func (c *Contractor) Close() error { } // New returns a new Contractor. -func New(db *sql.DB, cs modules.ConsensusSet, m modules.Manager, tpool modules.TransactionPool, wallet modules.Wallet, hdb modules.HostDB, dir string) (*Contractor, <-chan error) { +func New(db *sql.DB, cm *chain.Manager, s modules.Syncer, m modules.Manager, wallet modules.Wallet, hdb modules.HostDB, dir string) (*Contractor, <-chan error) { errChan := make(chan error, 1) defer close(errChan) - // Check for nil inputs. - if db == nil { - errChan <- errNilDB - return nil, errChan - } - if cs == nil { - errChan <- errNilCS - return nil, errChan - } - if m == nil { - errChan <- errNilManager - return nil, errChan - } - if wallet == nil { - errChan <- errNilWallet - return nil, errChan - } - if tpool == nil { - errChan <- errNilTpool - return nil, errChan - } - if hdb == nil { - errChan <- errNilHDB - return nil, errChan - } // Create the logger. - logger, err := persist.NewFileLogger(filepath.Join(dir, "contractor.log")) + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "contractor.log")) if err != nil { errChan <- err return nil, errChan } + // Create the contract set. - contractSet, err := contractset.NewContractSet(db, logger, cs.Height()) + contractSet, err := contractset.NewContractSet(db, logger, cm.Tip().Height) if err != nil { errChan <- err return nil, errChan } // Handle blocking startup. - c, err := contractorBlockingStartup(db, cs, m, tpool, wallet, hdb, contractSet, logger) + c, err := contractorBlockingStartup(db, cm, s, m, wallet, hdb, contractSet, logger) if err != nil { errChan <- err return nil, errChan } + // Close the logger upon shutdown. + c.tg.AfterStop(func() { + closeFn() + }) + // Non-blocking startup. go func() { // Subscribe to the consensus set in a separate goroutine. if err := c.tg.Add(); err != nil { - c.log.Println("ERROR: couldn't start a thread:", err) + c.log.Error("couldn't start a thread", zap.Error(err)) return } defer c.tg.Done() - err := contractorAsyncStartup(c, cs) + err := contractorAsyncStartup(c) if err != nil { - c.log.Println("ERROR: couldn't start contractor:", err) + c.log.Error("couldn't start contractor", zap.Error(err)) } }() @@ -323,17 +297,16 @@ func New(db *sql.DB, cs modules.ConsensusSet, m modules.Manager, tpool modules.T } // contractorBlockingStartup handles the blocking portion of New. -func contractorBlockingStartup(db *sql.DB, cs modules.ConsensusSet, m modules.Manager, tp modules.TransactionPool, w modules.Wallet, hdb modules.HostDB, contractSet *contractset.ContractSet, l *persist.Logger) (*Contractor, error) { +func contractorBlockingStartup(db *sql.DB, cm *chain.Manager, s modules.Syncer, m modules.Manager, w modules.Wallet, hdb modules.HostDB, contractSet *contractset.ContractSet, l *zap.Logger) (*Contractor, error) { // Create the Contractor object. c := &Contractor{ - staticAlerter: modules.NewAlerter("contractor"), - db: db, - cs: cs, - hdb: hdb, - log: l, - m: m, - tpool: tp, - wallet: w, + db: db, + cm: cm, + s: s, + hdb: hdb, + log: l, + m: m, + wallet: w, interruptMaintenance: make(chan struct{}), synced: make(chan struct{}), @@ -353,13 +326,6 @@ func contractorBlockingStartup(db *sql.DB, cs modules.ConsensusSet, m modules.Ma c.um = newUploadManager(c, 5, 3*time.Second) c.migrator = newMigrator(c, 1) - // Close the logger upon shutdown. - c.tg.AfterStop(func() { - if err := c.log.Close(); err != nil { - fmt.Println("ERROR: failed to close the contractor logger") - } - }) - // Load the prior persistence structures. err := c.load() if err != nil { @@ -378,11 +344,6 @@ func contractorBlockingStartup(db *sql.DB, cs modules.ConsensusSet, m modules.Ma // Update the pubkeysToContractID map. c.managedUpdatePubKeysToContractIDMap() - // Unsubscribe from the consensus set upon shutdown. - c.tg.OnStop(func() { - cs.Unsubscribe(c) - }) - // We may have resubscribed. Save now so that we don't lose our work. c.mu.Lock() err = c.save() @@ -394,22 +355,37 @@ func contractorBlockingStartup(db *sql.DB, cs modules.ConsensusSet, m modules.Ma return c, nil } -// contractorAsyncStartup handles the async portion of New. -func contractorAsyncStartup(c *Contractor, cs modules.ConsensusSet) error { - err := cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) - if modules.ContainsError(err, modules.ErrInvalidConsensusChangeID) { - // Reset the contractor consensus variables and try rescanning. - c.blockHeight = 0 - c.lastChange = modules.ConsensusChangeBeginning - err = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) - } - if modules.ContainsError(err, siasync.ErrStopped) { - return nil +func (c *Contractor) sync(index types.ChainIndex) error { + for index != c.cm.Tip() { + select { + case <-c.tg.StopChan(): + return nil + default: + } + crus, caus, err := c.cm.UpdatesSince(index, 1000) + if err != nil { + c.log.Error("failed to subscribe to chain manager", zap.Error(err)) + return err + } else if err := c.UpdateChainState(crus, caus); err != nil { + c.log.Error("failed to update chain state", zap.Error(err)) + return err + } + if len(caus) > 0 { + index = caus[len(caus)-1].State.Index + } } + return nil +} + +// contractorAsyncStartup handles the async portion of New. +func contractorAsyncStartup(c *Contractor) error { + err := c.sync(c.tip) if err != nil { - return err + // Reset the contractor consensus variables and try rescanning. + c.tip = types.ChainIndex{} + err = c.sync(c.tip) } - return nil + return err } // managedSynced returns true if the contractor is synced with the consensusset. @@ -462,7 +438,7 @@ func (c *Contractor) UnlockBalance(fcid types.FileContractID) { if !exists { contract, exists = c.staticContracts.OldContract(fcid) if !exists { - c.log.Println("ERROR: trying to unlock funds of a non-existing contract:", fcid) + c.log.Error("trying to unlock funds of a non-existing contract", zap.Stringer("fcid", fcid)) return } } @@ -471,7 +447,7 @@ func (c *Contractor) UnlockBalance(fcid types.FileContractID) { renter, err := c.managedFindRenter(fcid) c.mu.Unlock() if err != nil { - c.log.Println("ERROR: trying to unlock funds of a non-existing renter:", fcid) + c.log.Error("trying to unlock funds of a non-existing renter", zap.Stringer("fcid", fcid)) return } @@ -484,7 +460,7 @@ func (c *Contractor) UnlockBalance(fcid types.FileContractID) { err = c.m.UnlockSiacoins(renter.Email, amount, total, contract.StartHeight) if err != nil { - c.log.Println("ERROR: unable to unlock funds:", err) + c.log.Error("unable to unlock funds", zap.Error(err)) return } @@ -495,7 +471,7 @@ func (c *Contractor) UnlockBalance(fcid types.FileContractID) { func (c *Contractor) UpdateContract(rev types.FileContractRevision, sigs []types.TransactionSignature, uploads, downloads, fundAccount types.Currency) error { err := c.staticContracts.UpdateContract(rev, sigs, uploads, downloads, fundAccount) if err != nil { - c.log.Println("ERROR: revision update failed:", rev.ParentID) + c.log.Error("revision update failed", zap.Stringer("id", rev.ParentID), zap.Error(err)) } return err @@ -549,7 +525,7 @@ func (c *Contractor) UpdateRenterSettings(rpk types.PublicKey, settings modules. func (c *Contractor) UpdateMetadata(pk types.PublicKey, fm modules.FileMetadata) error { err := c.updateMetadata(pk, fm, true) if err != nil { - c.log.Println("ERROR: couldn't update metadata:", err) + c.log.Error("couldn't update metadata", zap.Error(err)) } return err } @@ -558,7 +534,7 @@ func (c *Contractor) UpdateMetadata(pk types.PublicKey, fm modules.FileMetadata) func (c *Contractor) RetrieveMetadata(pk types.PublicKey, present []modules.BucketFiles) (fm []modules.FileMetadata, err error) { fm, err = c.retrieveMetadata(pk, present) if err != nil { - c.log.Println("ERROR: couldn't retrieve metadata:", err) + c.log.Error("couldn't retrieve metadata", zap.Error(err)) } return } @@ -567,7 +543,7 @@ func (c *Contractor) RetrieveMetadata(pk types.PublicKey, present []modules.Buck func (c *Contractor) UpdateSlab(rpk types.PublicKey, slab modules.Slab, packed bool) error { err := c.updateSlab(rpk, slab, packed) if err != nil { - c.log.Println("ERROR: couldn't update slab:", err) + c.log.Error("couldn't update slab", zap.Error(err)) } return err } @@ -589,9 +565,14 @@ func (c *Contractor) AcceptContracts(rpk types.PublicKey, contracts []modules.Co } // Find the contract txn. - block, ok := c.cs.BlockAtHeight(contract.StartHeight + 1) + index, ok := c.cm.BestIndex(contract.StartHeight + 1) + if !ok { + c.log.Error("couldn't find chain index", zap.Uint64("height", contract.StartHeight+1)) + continue + } + block, ok := c.cm.Block(index.ID) if !ok { - c.log.Println("ERROR: couldn't find block at height", contract.StartHeight+1) + c.log.Error("couldn't find block at height", zap.Uint64("height", contract.StartHeight+1)) continue } @@ -607,16 +588,16 @@ func (c *Contractor) AcceptContracts(rpk types.PublicKey, contracts []modules.Co } } if !found { - c.log.Println("ERROR: couldn't find transaction for", contract.ID) + c.log.Error("couldn't find transaction for", zap.Stringer("id", contract.ID)) continue } // Sanity check: the end height should not be in the past. - // We use consensusset.Height instead of c.blockHeight here, because + // We use cm.Tip().Height instead of c.tip.Height here, because // the contractor may not be synced yet. - height := c.cs.Height() + height := c.cm.Tip().Height if endHeight <= height { - c.log.Printf("WARN: a contract was submitted with the end height in the past: %v <= %v\n", endHeight, height) + c.log.Warn(fmt.Sprintf("a contract was submitted with the end height in the past: %v <= %v", endHeight, height)) continue } @@ -630,7 +611,7 @@ func (c *Contractor) AcceptContracts(rpk types.PublicKey, contracts []modules.Co } rc, err := c.staticContracts.InsertContract(txn, contract.StartHeight, contract.TotalCost, contract.ContractPrice, txnFee, tax, rpk, true) if err != nil { - c.log.Printf("ERROR: couldn't accept contract %s: %v\n", contract.ID, err) + c.log.Error(fmt.Sprintf("couldn't accept contract %s", contract.ID), zap.Error(err)) continue } @@ -645,7 +626,7 @@ func (c *Contractor) AcceptContracts(rpk types.PublicKey, contracts []modules.Co c.renewedTo[contract.RenewedFrom] = contract.ID err = c.updateRenewedContract(contract.RenewedFrom, contract.ID) if err != nil { - c.log.Println("ERROR: couldn't update renewal history:", err) + c.log.Error("couldn't update renewal history", zap.Error(err)) } } } @@ -655,14 +636,14 @@ func (c *Contractor) AcceptContracts(rpk types.PublicKey, contracts []modules.Co for _, contract := range contracts { err := c.UpdateContract(contract.Revision, nil, contract.UploadSpending, contract.DownloadSpending, contract.FundAccountSpending) if err != nil { - c.log.Println("ERROR: couldn't update contract spendings", err) + c.log.Error("couldn't update contract spendings", zap.Error(err)) } } // Update the hostdb to include the new contracts. err := c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) } } diff --git a/modules/manager/contractor/contracts.go b/modules/manager/contractor/contracts.go index c7f613f..e5ddf2c 100644 --- a/modules/manager/contractor/contracts.go +++ b/modules/manager/contractor/contracts.go @@ -5,6 +5,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/contractor/contractset" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -63,7 +64,7 @@ func (c *Contractor) updatePubKeysToContractIDMap(contracts []modules.RenterCont // the pubKeysToContractID map. for pk, fcid := range uniqueGFU { if c.pubKeysToContractID[pk] != fcid { - c.log.Println("CRITICAL: contractor is not correctly mapping from pubkey to contract id, missing GFU contracts") + c.log.Error("contractor is not correctly mapping from pubkey to contract id, missing GFU contracts") } } } @@ -77,7 +78,7 @@ func (c *Contractor) tryAddContractToPubKeysMap(newContract modules.RenterContra if exists { gfu, gfr := newContract.Utility.GoodForUpload, newContract.Utility.GoodForRenew if gfu || gfr { - c.log.Println("CRITICAL: renewed contract is marked as good for upload or good for renew", gfu, gfr) + c.log.Error("renewed contract is marked as good for upload or good for renew", zap.Bool("GFU", gfu), zap.Bool("GFR", gfr)) } return } @@ -88,7 +89,7 @@ func (c *Contractor) tryAddContractToPubKeysMap(newContract modules.RenterContra if exists { // Sanity check - the contractor should not have multiple contract tips for the // same contract. - c.log.Println("ERROR: contractor has multiple contracts that don't form a renewedTo line for the same host and the same renter") + c.log.Error("contractor has multiple contracts that don't form a renewedTo line for the same host and the same renter") } c.pubKeysToContractID[pk] = newContract.ID } diff --git a/modules/manager/contractor/contractset/contract.go b/modules/manager/contractor/contractset/contract.go index 2b943e5..fce0812 100644 --- a/modules/manager/contractor/contractset/contract.go +++ b/modules/manager/contractor/contractset/contract.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -124,17 +125,17 @@ type FileContract struct { func (c *FileContract) EncodeTo(e *types.Encoder) { c.header.Transaction.EncodeTo(e) e.WriteUint64(c.header.StartHeight) - c.header.DownloadSpending.EncodeTo(e) - c.header.FundAccountSpending.EncodeTo(e) - c.header.MaintenanceSpending.AccountBalanceCost.EncodeTo(e) - c.header.MaintenanceSpending.FundAccountCost.EncodeTo(e) - c.header.MaintenanceSpending.UpdatePriceTableCost.EncodeTo(e) - c.header.StorageSpending.EncodeTo(e) - c.header.UploadSpending.EncodeTo(e) - c.header.TotalCost.EncodeTo(e) - c.header.ContractFee.EncodeTo(e) - c.header.TxnFee.EncodeTo(e) - c.header.SiafundFee.EncodeTo(e) + types.V1Currency(c.header.DownloadSpending).EncodeTo(e) + types.V1Currency(c.header.FundAccountSpending).EncodeTo(e) + types.V1Currency(c.header.MaintenanceSpending.AccountBalanceCost).EncodeTo(e) + types.V1Currency(c.header.MaintenanceSpending.FundAccountCost).EncodeTo(e) + types.V1Currency(c.header.MaintenanceSpending.UpdatePriceTableCost).EncodeTo(e) + types.V1Currency(c.header.StorageSpending).EncodeTo(e) + types.V1Currency(c.header.UploadSpending).EncodeTo(e) + types.V1Currency(c.header.TotalCost).EncodeTo(e) + types.V1Currency(c.header.ContractFee).EncodeTo(e) + types.V1Currency(c.header.TxnFee).EncodeTo(e) + types.V1Currency(c.header.SiafundFee).EncodeTo(e) e.WriteBool(c.header.Utility.GoodForUpload) e.WriteBool(c.header.Utility.GoodForRenew) e.WriteBool(c.header.Utility.BadContract) @@ -146,17 +147,17 @@ func (c *FileContract) EncodeTo(e *types.Encoder) { func (c *FileContract) DecodeFrom(d *types.Decoder) { c.header.Transaction.DecodeFrom(d) c.header.StartHeight = d.ReadUint64() - c.header.DownloadSpending.DecodeFrom(d) - c.header.FundAccountSpending.DecodeFrom(d) - c.header.MaintenanceSpending.AccountBalanceCost.DecodeFrom(d) - c.header.MaintenanceSpending.FundAccountCost.DecodeFrom(d) - c.header.MaintenanceSpending.UpdatePriceTableCost.DecodeFrom(d) - c.header.StorageSpending.DecodeFrom(d) - c.header.UploadSpending.DecodeFrom(d) - c.header.TotalCost.DecodeFrom(d) - c.header.ContractFee.DecodeFrom(d) - c.header.TxnFee.DecodeFrom(d) - c.header.SiafundFee.DecodeFrom(d) + (*types.V1Currency)(&c.header.DownloadSpending).DecodeFrom(d) + (*types.V1Currency)(&c.header.FundAccountSpending).DecodeFrom(d) + (*types.V1Currency)(&c.header.MaintenanceSpending.AccountBalanceCost).DecodeFrom(d) + (*types.V1Currency)(&c.header.MaintenanceSpending.FundAccountCost).DecodeFrom(d) + (*types.V1Currency)(&c.header.MaintenanceSpending.UpdatePriceTableCost).DecodeFrom(d) + (*types.V1Currency)(&c.header.StorageSpending).DecodeFrom(d) + (*types.V1Currency)(&c.header.UploadSpending).DecodeFrom(d) + (*types.V1Currency)(&c.header.TotalCost).DecodeFrom(d) + (*types.V1Currency)(&c.header.ContractFee).DecodeFrom(d) + (*types.V1Currency)(&c.header.TxnFee).DecodeFrom(d) + (*types.V1Currency)(&c.header.SiafundFee).DecodeFrom(d) c.header.Utility.GoodForUpload = d.ReadBool() c.header.Utility.GoodForRenew = d.ReadBool() c.header.Utility.BadContract = d.ReadBool() @@ -369,7 +370,7 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, rpk types.PublicK // Check if this contract already exists in the set. cs.mu.Lock() if _, exists := cs.contracts[fc.header.ID()]; exists { - cs.log.Println("CRITICAL: trying to overwrite existing contract") + cs.log.Error("trying to overwrite existing contract", zap.Stringer("id", fc.header.ID())) } cs.contracts[fc.header.ID()] = fc diff --git a/modules/manager/contractor/contractset/contractset.go b/modules/manager/contractor/contractset/contractset.go index f85ebaa..7d3848e 100644 --- a/modules/manager/contractor/contractset/contractset.go +++ b/modules/manager/contractor/contractset/contractset.go @@ -5,7 +5,7 @@ import ( "sync" "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -23,7 +23,7 @@ type ContractSet struct { pubKeys map[string]types.FileContractID mu sync.Mutex db *sql.DB - log *persist.Logger + log *zap.Logger } // Acquire looks up the contract for the specified host key and locks it before @@ -58,7 +58,7 @@ func (cs *ContractSet) Delete(c *FileContract) { _, ok := cs.contracts[c.header.ID()] if !ok { cs.mu.Unlock() - cs.log.Println("CRITICAL: delete called on already deleted contract") + cs.log.Error("delete called on already deleted contract", zap.Stringer("id", c.header.ID())) return } id := c.header.ID() @@ -72,7 +72,7 @@ func (cs *ContractSet) Delete(c *FileContract) { func (cs *ContractSet) Erase(fcid types.FileContractID) { err := deleteContract(fcid, cs.db) if err != nil { - cs.log.Println("ERROR: unable to delete the contract:", fcid) + cs.log.Error("unable to delete the contract", zap.Stringer("fcid", fcid)) } } @@ -123,7 +123,7 @@ func (cs *ContractSet) Return(c *FileContract) { _, ok := cs.contracts[c.header.ID()] if !ok { cs.mu.Unlock() - cs.log.Println("CRITICAL: no contract with that key") + cs.log.Error("no contract with that key", zap.Stringer("id", c.header.ID())) return } cs.mu.Unlock() @@ -213,7 +213,7 @@ func (cs *ContractSet) RetireContract(id types.FileContractID) { defer cs.mu.Unlock() c, exists := cs.contracts[id] if !exists { - cs.log.Println("ERROR: trying to retire a non-existing contract") + cs.log.Error("trying to retire a non-existing contract", zap.Stringer("id", id)) return } cs.oldContracts[id] = c @@ -226,19 +226,19 @@ func (cs *ContractSet) UnlockPayout(id types.FileContractID) { if !exists { c, exists = cs.oldContracts[id] if !exists { - cs.log.Println("ERROR: contract not found:", id) + cs.log.Error("contract not found", zap.Stringer("id", id)) return } } err := c.unlockPayout() if err != nil { - cs.log.Println("ERROR: couldn't unlock contract payout:", err) + cs.log.Error("couldn't unlock contract payout", zap.Error(err)) } } // NewContractSet returns a ContractSet storing its contracts in the specified // database. -func NewContractSet(db *sql.DB, log *persist.Logger, height uint64) (*ContractSet, error) { +func NewContractSet(db *sql.DB, log *zap.Logger, height uint64) (*ContractSet, error) { cs := &ContractSet{ contracts: make(map[types.FileContractID]*FileContract), oldContracts: make(map[types.FileContractID]*FileContract), diff --git a/modules/manager/contractor/contractset/database.go b/modules/manager/contractor/contractset/database.go index 1b85e11..9d9f7f0 100644 --- a/modules/manager/contractor/contractset/database.go +++ b/modules/manager/contractor/contractset/database.go @@ -8,6 +8,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "go.sia.tech/core/types" + "go.uber.org/zap" ) // saveContract saves the FileContract in the database. A lock must be acquired @@ -72,7 +73,7 @@ func (cs *ContractSet) loadContracts(height uint64) error { var fcBytes []byte for rows.Next() { if err := rows.Scan(&id, &renewed, &unlocked, &imported, &fcBytes); err != nil { - cs.log.Println("ERROR: unable to load file contract:", err) + cs.log.Error("unable to load file contract", zap.Error(err)) continue } @@ -81,7 +82,7 @@ func (cs *ContractSet) loadContracts(height uint64) error { fc := &FileContract{db: cs.db} fc.DecodeFrom(d) if err := d.Err(); err != nil { - cs.log.Println("ERROR: unable to decode file contract:", err) + cs.log.Error("unable to decode file contract", zap.Error(err)) continue } copy(fcid[:], id) @@ -112,7 +113,7 @@ func (cs *ContractSet) managedFindIDs(rpk types.PublicKey) []types.FileContractI WHERE renter_pk = ? `, rpk[:]) if err != nil { - cs.log.Println("ERROR: couldn't query database:", err) + cs.log.Error("couldn't query database", zap.Error(err)) return nil } defer rows.Close() @@ -121,7 +122,7 @@ func (cs *ContractSet) managedFindIDs(rpk types.PublicKey) []types.FileContractI for rows.Next() { id := make([]byte, 32) if err := rows.Scan(&id); err != nil { - cs.log.Println("ERROR: unable to get contract ID:", err) + cs.log.Error("unable to get contract ID", zap.Error(err)) continue } var fcid types.FileContractID diff --git a/modules/manager/contractor/database.go b/modules/manager/contractor/database.go index cd059c4..faf4c16 100644 --- a/modules/manager/contractor/database.go +++ b/modules/manager/contractor/database.go @@ -4,6 +4,7 @@ import ( "bytes" "database/sql" "errors" + "fmt" "io" "os" "path/filepath" @@ -11,6 +12,7 @@ import ( "github.com/mike76-dev/sia-satellite/internal/object" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "lukechampine.com/frand" rhpv2 "go.sia.tech/core/rhp/v2" @@ -40,9 +42,9 @@ func (c *Contractor) initDB() error { return nil } _, err = c.db.Exec(` - INSERT INTO ctr_info (height, last_change, synced) + INSERT INTO ctr_info (height, bid, synced) VALUES (?, ?, ?) - `, 0, modules.ConsensusChangeBeginning[:], false) + `, 0, []byte{}, false) return err } @@ -52,7 +54,7 @@ func (c *Contractor) loadState() error { var height uint64 var synced bool err := c.db.QueryRow(` - SELECT height, last_change, synced + SELECT height, bid, synced FROM ctr_info WHERE id = 1 `).Scan(&height, &cc, &synced) @@ -60,8 +62,8 @@ func (c *Contractor) loadState() error { return err } - c.blockHeight = height - copy(c.lastChange[:], cc) + c.tip.Height = height + copy(c.tip.ID[:], cc) c.synced = make(chan struct{}) if synced { close(c.synced) @@ -80,9 +82,9 @@ func (c *Contractor) updateState() error { } _, err := c.db.Exec(` UPDATE ctr_info - SET height = ?, last_change = ?, synced = ? + SET height = ?, bid = ?, synced = ? WHERE id = 1 - `, c.blockHeight, c.lastChange[:], synced) + `, c.tip.Height, c.tip.ID[:], synced) return err } @@ -173,7 +175,7 @@ func (c *Contractor) updateDoubleSpent() error { _, err = tx.Exec("DELETE FROM ctr_dspent") if err != nil { - c.log.Println("ERROR: couldn't clear double-spent contracts:", err) + c.log.Error("couldn't clear double-spent contracts", zap.Error(err)) tx.Rollback() return err } @@ -181,7 +183,7 @@ func (c *Contractor) updateDoubleSpent() error { for id, height := range c.doubleSpentContracts { _, err := tx.Exec("INSERT INTO ctr_dspent (id, height) VALUES (?, ?)", id[:], height) if err != nil { - c.log.Println("ERROR: couldn't update double-spent contracts:", err) + c.log.Error("couldn't update double-spent contracts", zap.Error(err)) tx.Rollback() return err } @@ -204,7 +206,7 @@ func (c *Contractor) loadRenewHistory() error { var fcid, fcidNew, fcidOld types.FileContractID for rows.Next() { if err := rows.Scan(&id, &from, &to); err != nil { - c.log.Println("Error scanning database row:", err) + c.log.Error("error scanning database row", zap.Error(err)) continue } copy(fcid[:], id) @@ -232,7 +234,7 @@ func (c *Contractor) loadRenters() error { FROM ctr_renters `) if err != nil { - c.log.Println("ERROR: could not load the renters:", err) + c.log.Error("could not load the renters", zap.Error(err)) return err } defer rows.Close() @@ -257,7 +259,7 @@ func (c *Contractor) loadRenters() error { &autoRepair, &proxyUploads, ); err != nil { - c.log.Println("ERROR: could not load the renter:", err) + c.log.Error("could not load the renter", zap.Error(err)) continue } @@ -293,13 +295,13 @@ func (c *Contractor) loadRenters() error { func (w *watchdog) save() error { tx, err := w.contractor.db.Begin() if err != nil { - w.contractor.log.Println("ERROR: couldn't save watchdog:", err) + w.contractor.log.Error("couldn't save watchdog", zap.Error(err)) return err } _, err = tx.Exec("DELETE FROM ctr_watchdog") if err != nil { - w.contractor.log.Println("ERROR: couldn't clear watchdog data:", err) + w.contractor.log.Error("couldn't clear watchdog data", zap.Error(err)) tx.Rollback() return err } @@ -311,7 +313,7 @@ func (w *watchdog) save() error { e.Flush() _, err := tx.Exec("INSERT INTO ctr_watchdog (id, bytes) VALUES (?, ?)", id[:], buf.Bytes()) if err != nil { - w.contractor.log.Println("ERROR: couldn't save watchdog:", err) + w.contractor.log.Error("couldn't save watchdog", zap.Error(err)) tx.Rollback() return err } @@ -324,13 +326,13 @@ func (w *watchdog) save() error { func (w *watchdog) load() error { tx, err := w.contractor.db.Begin() if err != nil { - w.contractor.log.Println("ERROR: couldn't load watchdog:", err) + w.contractor.log.Error("couldn't load watchdog", zap.Error(err)) return err } rows, err := tx.Query("SELECT id, bytes FROM ctr_watchdog") if err != nil && !errors.Is(err, sql.ErrNoRows) { - w.contractor.log.Println("ERROR: couldn't load watchdog:", err) + w.contractor.log.Error("couldn't load watchdog", zap.Error(err)) tx.Rollback() return err } @@ -340,7 +342,7 @@ func (w *watchdog) load() error { id := make([]byte, 32) var fcsBytes []byte if err := rows.Scan(&id, &fcsBytes); err != nil { - w.contractor.log.Println("ERROR: couldn't load watchdog:", err) + w.contractor.log.Error("couldn't load watchdog", zap.Error(err)) rows.Close() tx.Rollback() return err @@ -351,7 +353,7 @@ func (w *watchdog) load() error { d := types.NewDecoder(io.LimitedReader{R: buf, N: int64(len(fcsBytes))}) fcs.DecodeFrom(d) if err := d.Err(); err != nil { - w.contractor.log.Println("ERROR: couldn't load watchdog:", err) + w.contractor.log.Error("couldn't load watchdog", zap.Error(err)) rows.Close() tx.Rollback() return err @@ -378,7 +380,7 @@ func (w *watchdog) load() error { func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { tx, err := c.db.Begin() if err != nil { - c.log.Println("ERROR: unable to start transaction:", err) + c.log.Error("unable to start transaction", zap.Error(err)) return modules.AddContext(err, "unable to start transaction") } @@ -386,7 +388,7 @@ func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { var slabs []types.Hash256 rows, err := tx.Query("SELECT enc_key FROM ctr_slabs WHERE renter_pk = ?", pk[:]) if err != nil { - c.log.Println("ERROR: unable to query slabs:", err) + c.log.Error("unable to query slabs", zap.Error(err)) tx.Rollback() return modules.AddContext(err, "unable to query slabs") } @@ -395,7 +397,7 @@ func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { s := make([]byte, 32) if err := rows.Scan(&s); err != nil { rows.Close() - c.log.Println("ERROR: unable to load slab ID:", err) + c.log.Error("unable to load slab ID", zap.Error(err)) tx.Rollback() return modules.AddContext(err, "unable to load slab ID") } @@ -409,7 +411,7 @@ func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { for _, slab := range slabs { _, err := tx.Exec("DELETE FROM ctr_shards WHERE slab_id = ?", slab[:]) if err != nil { - c.log.Println("ERROR: unable to delete shards:", err) + c.log.Error("unable to delete shards", zap.Error(err)) continue } } @@ -417,7 +419,7 @@ func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { // Delete slabs. _, err = tx.Exec("DELETE FROM ctr_slabs WHERE renter_pk = ?", pk[:]) if err != nil { - c.log.Println("ERROR: unable to delete slabs:", err) + c.log.Error("unable to delete slabs", zap.Error(err)) tx.Rollback() return modules.AddContext(err, "unable to delete slabs") } @@ -425,14 +427,14 @@ func (c *Contractor) DeleteMetadata(pk types.PublicKey) error { // Delete objects. _, err = tx.Exec("DELETE FROM ctr_metadata WHERE renter_pk = ?", pk[:]) if err != nil { - c.log.Println("ERROR: unable to delete metadata:", err) + c.log.Error("unable to delete metadata", zap.Error(err)) tx.Rollback() return modules.AddContext(err, "unable to delete metadata") } err = tx.Commit() if err != nil { - c.log.Println("ERROR: unable to commit transaction:", err) + c.log.Error("unable to commit transaction", zap.Error(err)) return modules.AddContext(err, "unable to commit transaction") } @@ -1509,20 +1511,20 @@ func (c *Contractor) uploadPackedSlabs(rpk types.PublicKey) error { func (c *Contractor) managedUploadBufferedFiles() { // Skip if a satellite maintenance is running. if c.m.Maintenance() { - c.log.Println("INFO: skipping file uploads because satellite maintenance is running") + c.log.Info("skipping file uploads because satellite maintenance is running") return } // No file uploads unless contractor is synced. if !c.managedSynced() { - c.log.Println("INFO: skipping file uploads since consensus isn't synced yet") + c.log.Info("skipping file uploads since consensus isn't synced yet") return } c.mu.Lock() if c.uploadingBufferedFiles { c.mu.Unlock() - c.log.Println("INFO: skipping file uploads since another thread is running already") + c.log.Info("skipping file uploads since another thread is running already") return } c.uploadingBufferedFiles = true @@ -1533,7 +1535,7 @@ func (c *Contractor) managedUploadBufferedFiles() { c.mu.Unlock() pending, err := c.uploadPending() if err != nil { - c.log.Println("ERROR: couldn't check files pending upload") + c.log.Error("couldn't check files pending upload", zap.Error(err)) return } if pending { @@ -1541,7 +1543,7 @@ func (c *Contractor) managedUploadBufferedFiles() { } }() - c.log.Println("INFO: uploading buffered files") + c.log.Info("uploading buffered files") // Sort the files by the upload timestamp, the older come first. rows, err := c.db.Query(` @@ -1551,7 +1553,7 @@ func (c *Contractor) managedUploadBufferedFiles() { ORDER BY filename ASC `) if err != nil { - c.log.Println("ERROR: couldn't query buffered files:", err) + c.log.Error("couldn't query buffered files", zap.Error(err)) return } defer rows.Close() @@ -1567,7 +1569,7 @@ func (c *Contractor) managedUploadBufferedFiles() { var bucket, path, mimeType []byte if err := rows.Scan(&n, &bucket, &path, &mimeType, &pk, &encrypted); err != nil { rows.Close() - c.log.Println("ERROR: couldn't scan file record:", err) + c.log.Error("couldn't scan file record", zap.Error(err)) return } var rpk types.PublicKey @@ -1578,7 +1580,7 @@ func (c *Contractor) managedUploadBufferedFiles() { name := filepath.Join(c.m.BufferedFilesDir(), n) file, err := os.Open(name) if err != nil { - c.log.Println("ERROR: couldn't open file:", err) + c.log.Error("couldn't open file", zap.Error(err)) return err } defer func() { @@ -1586,7 +1588,7 @@ func (c *Contractor) managedUploadBufferedFiles() { if err == nil { err = os.Remove(name) if err != nil { - c.log.Println("ERROR: couldn't delete file:", err) + c.log.Error("couldn't delete file", zap.Error(err)) return } _, err = c.db.Exec(` @@ -1597,7 +1599,7 @@ func (c *Contractor) managedUploadBufferedFiles() { AND filepath = ? `, pk, n, bucket, path) if err != nil { - c.log.Println("ERROR: couldn't delete file record:", err) + c.log.Error("couldn't delete file record", zap.Error(err)) return } } @@ -1606,19 +1608,19 @@ func (c *Contractor) managedUploadBufferedFiles() { // Upload the data. fm, err := c.managedUploadObject(file, rpk, bucket, path, mimeType, encrypted) if err != nil { - c.log.Println("ERROR: couldn't upload object:", err) + c.log.Error("couldn't upload object", zap.Error(err)) return err } // Store the object in the database. if err := c.updateMetadata(rpk, fm, false); err != nil { - c.log.Println("ERROR: couldn't save object:", err) + c.log.Error("couldn't save object", zap.Error(err)) return err } // Upload any complete slabs. if err := c.uploadPackedSlabs(rpk); err != nil { - c.log.Println("ERROR: couldn't upload packed slabs:", err) + c.log.Error("couldn't upload packed slabs", zap.Error(err)) return err } @@ -1653,7 +1655,7 @@ func (c *Contractor) threadedUploadBufferedFiles() { func (c *Contractor) managedPruneOrphanedSlabs() { tx, err := c.db.Begin() if err != nil { - c.log.Println("ERROR: unable to start transaction:", err) + c.log.Error("unable to start transaction", zap.Error(err)) return } @@ -1672,7 +1674,7 @@ func (c *Contractor) managedPruneOrphanedSlabs() { ) `, uint64(time.Now().Add(-orphanedSlabPruneThreshold).Unix())) if err != nil { - c.log.Println("ERROR: unable to query slabs:", err) + c.log.Error("unable to query slabs", zap.Error(err)) tx.Rollback() return } @@ -1681,7 +1683,7 @@ func (c *Contractor) managedPruneOrphanedSlabs() { for rows.Next() { key := make([]byte, 32) if err := rows.Scan(&key); err != nil { - c.log.Println("ERROR: unable to get slab ID:", err) + c.log.Error("unable to get slab ID", zap.Error(err)) rows.Close() tx.Rollback() return @@ -1696,7 +1698,7 @@ func (c *Contractor) managedPruneOrphanedSlabs() { for _, id := range ids { _, err := tx.Exec("DELETE FROM ctr_shards WHERE slab_id = ?", id[:]) if err != nil { - c.log.Println("ERROR: unable to delete shards:", err) + c.log.Error("unable to delete shards", zap.Error(err)) tx.Rollback() return } @@ -1705,13 +1707,13 @@ func (c *Contractor) managedPruneOrphanedSlabs() { // Delete the slabs. res, err := tx.Exec("DELETE FROM ctr_slabs WHERE orphan = TRUE") if err != nil { - c.log.Println("ERROR: unable to delete orphaned slabs:", err) + c.log.Error("unable to delete orphaned slabs", zap.Error(err)) tx.Rollback() return } num, _ := res.RowsAffected() if num > 0 { - c.log.Printf("INFO: deleted %d orphaned slabs\n", num) + c.log.Info(fmt.Sprintf("deleted %d orphaned slabs", num)) } // Delete orphaned shards. @@ -1723,17 +1725,17 @@ func (c *Contractor) managedPruneOrphanedSlabs() { ) `) if err != nil { - c.log.Println("ERROR: unable to delete orphaned shards:", err) + c.log.Error("unable to delete orphaned shards", zap.Error(err)) tx.Rollback() return } num, _ = res.RowsAffected() if num > 0 { - c.log.Printf("INFO: deleted %d orphaned shards\n", num) + c.log.Info(fmt.Sprintf("deleted %d orphaned shards", num)) } if err := tx.Commit(); err != nil { - c.log.Println("ERROR: unable to commit transaction:", err) + c.log.Error("unable to commit transaction", zap.Error(err)) } } diff --git a/modules/manager/contractor/download.go b/modules/manager/contractor/download.go index e35f457..0887b16 100644 --- a/modules/manager/contractor/download.go +++ b/modules/manager/contractor/download.go @@ -16,6 +16,7 @@ import ( "github.com/mike76-dev/sia-satellite/internal/object" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -345,7 +346,7 @@ outer: atomic.AddUint64(&concurrentSlabs, ^uint64(0)) if resp.err != nil { - mgr.contractor.log.Printf("ERROR: download slab %v failed: %v\n", resp.index, resp.err) + mgr.contractor.log.Error(fmt.Sprintf("download slab %v failed", resp.index), zap.Error(resp.err)) return resp.err } @@ -357,7 +358,7 @@ outer: // Partial slab. _, err = cw.Write(s.Data) if err != nil { - mgr.contractor.log.Printf("failed to send partial slab %v: %v\n", respIndex, err) + mgr.contractor.log.Error(fmt.Sprintf("failed to send partial slab %v", respIndex), zap.Error(err)) return err } } else { @@ -365,7 +366,7 @@ outer: slabs[respIndex].Decrypt(next.shards) err := slabs[respIndex].Recover(cw, next.shards) if err != nil { - mgr.contractor.log.Printf("failed to recover slab %v: %v\n", respIndex, err) + mgr.contractor.log.Error(fmt.Sprintf("failed to recover slab %v", respIndex), zap.Error(err)) return err } } diff --git a/modules/manager/contractor/form.go b/modules/manager/contractor/form.go index 66cdbe8..d35aaba 100644 --- a/modules/manager/contractor/form.go +++ b/modules/manager/contractor/form.go @@ -9,6 +9,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" @@ -38,7 +39,7 @@ func fundsToExpectedStorage(funds types.Currency, duration uint64, hostSettings // transaction set. func (c *Contractor) prepareContractFormation(rpk types.PublicKey, host modules.HostDBEntry, contractFunding, hostCollateral types.Currency, endHeight uint64, address types.Address) ([]types.Transaction, types.Transaction, []types.Transaction, types.Currency, types.Currency, types.Currency, error) { c.mu.RLock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() // Prepare contract and transaction. @@ -48,20 +49,22 @@ func (c *Contractor) prepareContractFormation(rpk types.PublicKey, host modules. txn := types.Transaction{ FileContracts: []types.FileContract{fc}, } - _, txnFee := c.tpool.FeeEstimation() - minerFee := txnFee.Mul64(modules.EstimatedFileContractTransactionSetSize) + txnFee := c.cm.RecommendedFee() + minerFee := txnFee.Mul64(2048) txn.MinerFees = []types.Currency{minerFee} totalCost := cost.Add(minerFee).Add(tax) - parentTxn, toSign, err := c.wallet.FundTransaction(&txn, totalCost) + parents, toSign, err := c.wallet.Fund(&txn, totalCost) if err != nil { - c.wallet.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) return nil, types.Transaction{}, nil, types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, modules.AddContext(err, "unable to fund transaction") } // Make a copy of the transactions to be used to by the watchdog // to double spend these inputs in case the contract never appears on chain. sweepTxn := modules.CopyTransaction(txn) - sweepParents := []types.Transaction{modules.CopyTransaction(parentTxn)} + var sweepParents []types.Transaction + for _, parent := range parents { + sweepParents = append(sweepParents, modules.CopyTransaction(parent)) + } // Add an output that sends all funds back to the Satellite address. output := types.SiacoinOutput{ @@ -71,14 +74,21 @@ func (c *Contractor) prepareContractFormation(rpk types.PublicKey, host modules. sweepTxn.SiacoinOutputs = append(sweepTxn.SiacoinOutputs, output) // Sign the transaction. - cf := modules.ExplicitCoveredFields(txn) - err = c.wallet.Sign(&txn, toSign, cf) + for i, id := range toSign { + txn.Signatures = append(txn.Signatures, types.TransactionSignature{ + ParentID: id, + CoveredFields: types.CoveredFields{ + SiacoinInputs: []uint64{uint64(i)}, + }, + }) + } + err = c.wallet.Sign(c.cm.TipState(), &txn, toSign) if err != nil { - c.wallet.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) + c.wallet.Release(append(parents, txn)) return nil, types.Transaction{}, nil, types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, modules.AddContext(err, "unable to sign transaction") } - return append([]types.Transaction{parentTxn}, txn), sweepTxn, sweepParents, totalCost, minerFee, tax, nil + return append(parents, txn), sweepTxn, sweepParents, totalCost, minerFee, tax, nil } // managedNewContract negotiates an initial file contract with the specified @@ -87,7 +97,7 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe // Check if we know this renter. c.mu.RLock() renter, exists := c.renters[rpk] - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() if !exists { return types.ZeroCurrency, modules.RenterContract{}, ErrRenterNotFound @@ -140,7 +150,7 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe hostSettings.NetAddress = host.Settings.NetAddress // Check if the host is gouging. - _, txnFee := c.tpool.FeeEstimation() + txnFee := c.cm.RecommendedFee() if err := modules.CheckGouging(renter.Allowance, blockHeight, &hostSettings, nil, txnFee); err != nil { hostFault = true return modules.AddContext(err, "host is gouging") @@ -175,7 +185,7 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe rev, txnSet, err = proto.RPCFormContract(ctx, t, esk, renterTxnSet) if err != nil { hostFault = true - c.wallet.ReleaseInputs(renterTxnSet) + c.wallet.Release(renterTxnSet) return modules.AddContext(err, "couldn't form contract") } @@ -186,15 +196,13 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe } // Submit to blockchain. - err = c.tpool.AcceptTransactionSet(txnSet) - if modules.ContainsError(err, errDuplicateTransactionSet) { - // As long as it made it into the transaction pool, we're good. - err = nil - } + _, err = c.cm.AddPoolTransactions(txnSet) if err != nil { - c.wallet.ReleaseInputs(txnSet) + c.wallet.Release(txnSet) + c.log.Error("couldn't submit transaction set to the pool", zap.Error(err)) return types.ZeroCurrency, modules.RenterContract{}, err } + c.s.BroadcastTransactionSet(txnSet) // Add contract to the set. revisionTxn := types.Transaction{ @@ -203,7 +211,7 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe } contract, err := c.staticContracts.InsertContract(revisionTxn, blockHeight, totalCost, contractPrice, minerFee, siafundFee, rpk, false) if err != nil { - c.log.Println("ERROR: couldn't add the new contract to the contract set:", err) + c.log.Error("couldn't add the new contract to the contract set", zap.Error(err)) return types.ZeroCurrency, modules.RenterContract{}, err } @@ -229,18 +237,18 @@ func (c *Contractor) managedNewContract(rpk types.PublicKey, rsk types.PrivateKe c.mu.Unlock() // We need to return a funding value because money was spent on this // host, even though the full process could not be completed. - c.log.Println("WARN: attempted to form a new contract with a host that this renter already has a contract with.") + c.log.Warn("attempted to form a new contract with a host that this renter already has a contract with") return contractFunding, modules.RenterContract{}, fmt.Errorf("%v already has a contract with host %v", contract.RenterPublicKey, contract.HostPublicKey) } c.pubKeysToContractID[contract.RenterPublicKey.String()+contract.HostPublicKey.String()] = contract.ID c.mu.Unlock() - c.log.Printf("INFO: formed contract %v with %v for %v\n", contract.ID, host.Settings.NetAddress, contract.RenterFunds) + c.log.Info("formed new contract", zap.Stringer("id", contract.ID), zap.String("host", host.Settings.NetAddress), zap.Stringer("amount", contract.RenterFunds)) // Update the hostdb to include the new contract. err = c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) } return contractFunding, contract, nil } @@ -256,22 +264,12 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ // Check if we know this renter. c.mu.RLock() renter, exists := c.renters[rpk] - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() if !exists { return nil, ErrRenterNotFound } - // Register or unregister and alerts related to contract formation. - var registerLowFundsAlert bool - defer func() { - if registerLowFundsAlert { - c.staticAlerter.RegisterAlert(modules.AlertIDRenterAllowanceLowFunds, AlertMSGAllowanceLowFunds, AlertCauseInsufficientAllowanceFunds, modules.SeverityWarning) - } else { - c.staticAlerter.UnregisterAlert(modules.AlertIDRenterAllowanceLowFunds) - } - }() - // Check if the renter has enough contracts according to their allowance. fundsRemaining := renter.Allowance.Funds numHosts := renter.Allowance.Hosts @@ -310,8 +308,8 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ } // Calculate the anticipated transaction fee. - _, maxFee := c.tpool.FeeEstimation() - txnFee := maxFee.Mul64(modules.EstimatedFileContractTransactionSetSize) + fee := c.cm.RecommendedFee() + txnFee := fee.Mul64(2048) // Form contracts with the hosts one at a time, until we have enough contracts. for _, host := range hosts { @@ -330,14 +328,14 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ // Fetch the price table. pt, err := proto.FetchPriceTable(host) if err != nil { - c.log.Printf("WARN: unable to fetch price table from %s: %v", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("unable to fetch price table from %s", host.Settings.NetAddress), zap.Error(err)) continue } // Check if the host is gouging. err = modules.CheckGouging(renter.Allowance, blockHeight, nil, &pt, txnFee) if err != nil { - c.log.Printf("WARN: gouging detected at %s: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("gouging detected at %s", host.Settings.NetAddress), zap.Error(err)) continue } @@ -356,23 +354,16 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ contractFunds = minInitialContractFunds } - // Confirm that the wallet is unlocked. - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - return nil, errors.New("the wallet is locked") - } - // Determine if we have enough money to form a new contract. if fundsRemaining.Cmp(contractFunds) < 0 { - registerLowFundsAlert = true - c.log.Println("WARN: need to form new contracts, but unable to because of a low allowance") + c.log.Warn("need to form new contracts, but unable to because of a low allowance", zap.String("renter", renter.Email)) break } // Attempt forming a contract with this host. fundsSpent, newContract, err := c.managedNewContract(rpk, rsk, host, contractFunds, endHeight) if err != nil { - c.log.Printf("WARN: attempted to form a contract with %v, but negotiation failed: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("attempted to form a contract with %v, but negotiation failed", host.Settings.NetAddress), zap.Error(err)) continue } fundsRemaining = fundsRemaining.Sub(fundsSpent) @@ -384,13 +375,13 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds:", err) + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of formations in the database. err = c.m.IncrementStats(renter.Email, false) if err != nil { - c.log.Println("ERROR: couldn't update stats") + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -400,14 +391,14 @@ func (c *Contractor) FormContracts(rpk types.PublicKey, rsk types.PrivateKey) ([ GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) continue } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } } @@ -420,7 +411,7 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk // Check if we know this renter. c.mu.RLock() renter, exists := c.renters[rpk] - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() if !exists { return types.ZeroCurrency, modules.RenterContract{}, ErrRenterNotFound @@ -473,7 +464,7 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk hostSettings.NetAddress = host.Settings.NetAddress // Check if the host is gouging. - _, txnFee := c.tpool.FeeEstimation() + txnFee := c.cm.RecommendedFee() if err := modules.CheckGouging(renter.Allowance, blockHeight, &hostSettings, nil, txnFee); err != nil { hostFault = true return modules.AddContext(err, "host is gouging") @@ -504,7 +495,7 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk rev, txnSet, err = proto.RPCTrustlessFormContract(ctx, t, s, epk, renterTxnSet) if err != nil { hostFault = true - c.wallet.ReleaseInputs(renterTxnSet) + c.wallet.Release(renterTxnSet) return modules.AddContext(err, "couldn't form contract") } @@ -515,15 +506,13 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk } // Submit to blockchain. - err = c.tpool.AcceptTransactionSet(txnSet) - if modules.ContainsError(err, errDuplicateTransactionSet) { - // As long as it made it into the transaction pool, we're good. - err = nil - } + _, err = c.cm.AddPoolTransactions(txnSet) if err != nil { - c.wallet.ReleaseInputs(txnSet) + c.wallet.Release(txnSet) + c.log.Error("couldn't submit transaction set to the pool", zap.Error(err)) return types.ZeroCurrency, modules.RenterContract{}, err } + c.s.BroadcastTransactionSet(txnSet) // Add contract to the set. revisionTxn := types.Transaction{ @@ -532,7 +521,7 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk } contract, err := c.staticContracts.InsertContract(revisionTxn, blockHeight, totalCost, contractPrice, minerFee, siafundFee, rpk, false) if err != nil { - c.log.Println("ERROR: couldn't add the new contract to the contract set:", err) + c.log.Error("couldn't add the new contract to the contract set", zap.Error(err)) return types.ZeroCurrency, modules.RenterContract{}, err } @@ -558,18 +547,18 @@ func (c *Contractor) managedTrustlessNewContract(s *modules.RPCSession, rpk, epk c.mu.Unlock() // We need to return a funding value because money was spent on this // host, even though the full process could not be completed. - c.log.Println("WARN: attempted to form a new contract with a host that this renter already has a contract with.") + c.log.Warn("attempted to form a new contract with a host that this renter already has a contract with") return contractFunding, modules.RenterContract{}, fmt.Errorf("%v already has a contract with host %v", contract.RenterPublicKey, contract.HostPublicKey) } c.pubKeysToContractID[contract.RenterPublicKey.String()+contract.HostPublicKey.String()] = contract.ID c.mu.Unlock() - c.log.Printf("INFO: formed contract %v with %v for %v\n", contract.ID, host.Settings.NetAddress, contract.RenterFunds) + c.log.Info("formed new contract", zap.Stringer("id", contract.ID), zap.String("host", host.Settings.NetAddress), zap.Stringer("amount", contract.RenterFunds)) // Update the hostdb to include the new contract. err = c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) } return contractFunding, contract, nil } @@ -601,8 +590,8 @@ func (c *Contractor) FormContract(s *modules.RPCSession, rpk, epk, hpk types.Pub } // Calculate the anticipated transaction fee. - _, maxFee := c.tpool.FeeEstimation() - txnFee := maxFee.Mul64(modules.EstimatedFileContractTransactionSetSize) + fee := c.cm.RecommendedFee() + txnFee := fee.Mul64(2048) // Calculate the contract funding with the host. contractFunds := host.Settings.ContractPrice.Add(txnFee).Mul64(ContractFeeFundingMulFactor) @@ -616,16 +605,10 @@ func (c *Contractor) FormContract(s *modules.RPCSession, rpk, epk, hpk types.Pub contractFunds = minInitialContractFunds } - // Confirm that the wallet is unlocked. - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - return modules.RenterContract{}, errors.New("the wallet is locked") - } - // Attempt forming a contract with this host. fundsSpent, newContract, err := c.managedTrustlessNewContract(s, rpk, epk, host, contractFunds, endHeight) if err != nil { - c.log.Printf("WARN: attempted to form a contract with %v, but negotiation failed: %v\n", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("attempted to form a contract with %v, but negotiation failed", host.Settings.NetAddress), zap.Error(err)) return modules.RenterContract{}, err } @@ -635,13 +618,13 @@ func (c *Contractor) FormContract(s *modules.RPCSession, rpk, epk, hpk types.Pub amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds:", err) + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of formations in the database. err = c.m.IncrementStats(renter.Email, false) if err != nil { - c.log.Println("ERROR: couldn't update stats") + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -650,14 +633,14 @@ func (c *Contractor) FormContract(s *modules.RPCSession, rpk, epk, hpk types.Pub GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) return modules.RenterContract{}, err } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } return newContract, nil diff --git a/modules/manager/contractor/maintenancechecks.go b/modules/manager/contractor/maintenancechecks.go index 3db14d9..7063b84 100644 --- a/modules/manager/contractor/maintenancechecks.go +++ b/modules/manager/contractor/maintenancechecks.go @@ -1,13 +1,16 @@ package contractor import ( + "fmt" "math" "math/big" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/contractor/contractset" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" + rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" ) @@ -70,16 +73,16 @@ func (c *Contractor) managedCheckHostScore(contract modules.RenterContract, sb m if deadScore || badScore { // Log if the utility has changed. if u.GoodForUpload || u.GoodForRenew { - c.log.Printf("INFO: marking contract as having no utility because of host score: %v\n", contract.ID) - c.log.Println("Min Score:", minScoreGFR) - c.log.Println("Score: ", sb.Score) - c.log.Println("Age Adjustment: ", sb.Age) - c.log.Println("Collateral Adjustment: ", sb.Collateral) - c.log.Println("Interaction Adjustment:", sb.Interactions) - c.log.Println("Price Adjustment: ", sb.Prices) - c.log.Println("Storage Adjustment: ", sb.StorageRemaining) - c.log.Println("Uptime Adjustment: ", sb.Uptime) - c.log.Println("Version Adjustment: ", sb.Version) + c.log.Info("marking contract as having no utility because of host score", zap.Stringer("id", contract.ID)) + c.log.Info("Min Score", zap.Stringer("minScoreGFR", minScoreGFR)) + c.log.Info("Score", zap.Stringer("score", sb.Score)) + c.log.Info("Age Adjustment", zap.Float64("age", sb.Age)) + c.log.Info("Collateral Adjustment", zap.Float64("collateral", sb.Collateral)) + c.log.Info("Interaction Adjustment", zap.Float64("interactions", sb.Interactions)) + c.log.Info("Price Adjustment", zap.Float64("prices", sb.Prices)) + c.log.Info("Storage Adjustment", zap.Float64("storageRemaining", sb.StorageRemaining)) + c.log.Info("Uptime Adjustment", zap.Float64("uptime", sb.Uptime)) + c.log.Info("Version Adjustment", zap.Float64("version", sb.Version)) } u.GoodForUpload = false u.GoodForRenew = false @@ -95,19 +98,19 @@ func (c *Contractor) managedCheckHostScore(contract modules.RenterContract, sb m // Contract should not be used for uplodaing if the score is poor. if !minScoreGFU.IsZero() && sb.Score.Cmp(minScoreGFU) < 0 { if u.GoodForUpload { - c.log.Printf("Marking contract as not good for upload because of a poor score: %v\n", contract.ID) - c.log.Println("Min Score:", minScoreGFU) - c.log.Println("Score: ", sb.Score) - c.log.Println("Age Adjustment: ", sb.Age) - c.log.Println("Collateral Adjustment: ", sb.Collateral) - c.log.Println("Interaction Adjustment:", sb.Interactions) - c.log.Println("Price Adjustment: ", sb.Prices) - c.log.Println("Storage Adjustment: ", sb.StorageRemaining) - c.log.Println("Uptime Adjustment: ", sb.Uptime) - c.log.Println("Version Adjustment: ", sb.Version) + c.log.Info("marking contract as not good for upload because of a poor score", zap.Stringer("id", contract.ID)) + c.log.Info("Min Score", zap.Stringer("minScoreGFU", minScoreGFU)) + c.log.Info("Score", zap.Stringer("score", sb.Score)) + c.log.Info("Age Adjustment", zap.Float64("age", sb.Age)) + c.log.Info("Collateral Adjustment", zap.Float64("collateral", sb.Collateral)) + c.log.Info("Interaction Adjustment", zap.Float64("interactions", sb.Interactions)) + c.log.Info("Price Adjustment", zap.Float64("prices", sb.Prices)) + c.log.Info("Storage Adjustment", zap.Float64("storageRemaining", sb.StorageRemaining)) + c.log.Info("Uptime Adjustment", zap.Float64("uptime", sb.Uptime)) + c.log.Info("Version Adjustment", zap.Float64("version", sb.Version)) } if !u.GoodForRenew { - c.log.Println("Marking contract as being good for renew", contract.ID) + c.log.Info("marking contract as being good for renew", zap.Stringer("id", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = true @@ -133,12 +136,12 @@ func (c *Contractor) managedCriticalUtilityChecks(fc *contractset.FileContract, renter, err := c.managedFindRenter(contract.ID) c.mu.RUnlock() if err != nil { - c.log.Println("ERROR: renter not found") + c.log.Error("renter not found", zap.Stringer("fcid", contract.ID)) return modules.ContractUtility{}, false } c.mu.RLock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height _, renewed := c.renewedTo[contract.ID] c.mu.RUnlock() @@ -200,7 +203,7 @@ func (c *Contractor) managedHostInHostDBCheck(contract modules.RenterContract) ( if !exists || host.Filtered || err != nil { // Log if the utility has changed. if u.GoodForUpload || u.GoodForRenew { - c.log.Printf("INFO: marking contract as having no utility because found in hostDB: %v, or host is Filtered: %v - %v\n", exists, host.Filtered, contract.ID) + c.log.Info("marking contract as having no utility because found in hostDB", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = false @@ -224,7 +227,7 @@ func (c *Contractor) offlineCheck(contract modules.RenterContract, host modules. if isOffline(host) { // Log if the utility has changed. if u.GoodForUpload || u.GoodForRenew { - c.log.Println("INFO: marking contract as having no utility because of host being offline:", contract.ID) + c.log.Info("marking contract as having no utility because of host being offline", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = false @@ -242,10 +245,10 @@ func (c *Contractor) upForRenewalCheck(contract modules.RenterContract, renewWin // renew the contract. if blockHeight+renewWindow >= contract.EndHeight { if u.GoodForUpload { - c.log.Println("INFO: marking contract as not good for upload because it is time to renew the contract:", contract.ID) + c.log.Info("marking contract as not good for upload because it is time to renew the contract", zap.Stringer("fcid", contract.ID)) } if !u.GoodForRenew { - c.log.Println("INFO: marking contract as being good for renew:", contract.ID) + c.log.Info("marking contract as being good for renew", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = true @@ -263,19 +266,19 @@ func (c *Contractor) sufficientFundsCheck(contract modules.RenterContract, host // Contract should not be used for uploading if the contract does // not have enough money remaining to perform the upload. - blockBytes := types.NewCurrency64(modules.SectorSize * period) + blockBytes := types.NewCurrency64(rhpv2.SectorSize * period) sectorStoragePrice := host.Settings.StoragePrice.Mul(blockBytes) - sectorUploadBandwidthPrice := host.Settings.UploadBandwidthPrice.Mul64(modules.SectorSize) - sectorDownloadBandwidthPrice := host.Settings.DownloadBandwidthPrice.Mul64(modules.SectorSize) + sectorUploadBandwidthPrice := host.Settings.UploadBandwidthPrice.Mul64(rhpv2.SectorSize) + sectorDownloadBandwidthPrice := host.Settings.DownloadBandwidthPrice.Mul64(rhpv2.SectorSize) sectorBandwidthPrice := sectorUploadBandwidthPrice.Add(sectorDownloadBandwidthPrice) sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) percentRemaining, _ := big.NewRat(0, 1).SetFrac(contract.RenterFunds.Big(), contract.TotalCost.Big()).Float64() if contract.RenterFunds.Cmp(sectorPrice.Mul64(3)) < 0 || percentRemaining < minContractFundUploadThreshold { if u.GoodForUpload { - c.log.Printf("INFO: marking contract as not good for upload because of insufficient funds: %v vs. %v - %v\n", contract.RenterFunds.Cmp(sectorPrice.Mul64(3)) < 0, percentRemaining, contract.ID) + c.log.Info(fmt.Sprintf("marking contract as not good for upload because of insufficient funds: %v vs. %v", contract.RenterFunds.Cmp(sectorPrice.Mul64(3)) < 0, percentRemaining), zap.Stringer("fcid", contract.ID)) } if !u.GoodForRenew { - c.log.Println("INFO: marking contract as being good for renew:", contract.ID) + c.log.Info("marking contract as being good for renew", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = true @@ -296,10 +299,10 @@ func (c *Contractor) outOfStorageCheck(contract modules.RenterContract, blockHei // Contract should not be used for uploading if the host is out of storage. if blockHeight-u.LastOOSErr <= oosRetryInterval { if u.GoodForUpload { - c.log.Println("INFO: marking contract as not being good for upload due to the host running out of storage:", contract.ID) + c.log.Info("marking contract as not being good for upload due to the host running out of storage", zap.Stringer("fcid", contract.ID)) } if !u.GoodForRenew { - c.log.Println("INFO: marking contract as being good for renew:", contract.ID) + c.log.Info("marking contract as being good for renew", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = true @@ -315,31 +318,31 @@ func (c *Contractor) gougingCheck(contract modules.RenterContract, host modules. // Get the renter. rpk, err := c.staticContracts.RenterByContractID(contract.ID) if err != nil { - c.log.Println("ERROR: no renter found that has this contract:", err) + c.log.Error("no renter found that has this contract", zap.Error(err)) return u, false } c.mu.Lock() renter, exists := c.renters[rpk] c.mu.Unlock() if !exists { - c.log.Println("ERROR: renter not found in the database:", rpk) + c.log.Error("renter not found in the database", zap.Stringer("renter", rpk)) return u, false } // Fetch the price table. pt, err := proto.FetchPriceTable(host) if err != nil { - c.log.Printf("WARN: unable to fetch price table from %s: %v", host.Settings.NetAddress, err) + c.log.Warn(fmt.Sprintf("unable to fetch price table from %s", host.Settings.NetAddress), zap.Error(err)) return u, false } // Contract has no utility if the host is gouging. - _, maxFee := c.tpool.FeeEstimation() - err = modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, maxFee) + fee := c.cm.RecommendedFee() + err = modules.CheckGouging(renter.Allowance, blockHeight, &host.Settings, &pt, fee) if err != nil { // Log if the utility has changed. if u.GoodForUpload || u.GoodForRenew { - c.log.Println("INFO: marking contract as having no utility because the host is gouging:", contract.ID) + c.log.Info("marking contract as having no utility because the host is gouging", zap.Stringer("fcid", contract.ID)) } u.GoodForUpload = false u.GoodForRenew = false diff --git a/modules/manager/contractor/persist.go b/modules/manager/contractor/persist.go index f3605be..cd171b8 100644 --- a/modules/manager/contractor/persist.go +++ b/modules/manager/contractor/persist.go @@ -2,6 +2,8 @@ package contractor import ( "time" + + "go.uber.org/zap" ) // saveFrequency determines how often the Contractor will be saved. @@ -11,31 +13,31 @@ const saveFrequency = 2 * time.Minute func (c *Contractor) load() error { err := c.initDB() if err != nil { - c.log.Println("ERROR: couldn't initialize database:", err) + c.log.Error("couldn't initialize database", zap.Error(err)) return err } err = c.loadState() if err != nil { - c.log.Println("ERROR: couldn't load sync state:", err) + c.log.Error("couldn't load sync state", zap.Error(err)) return err } err = c.loadDoubleSpent() if err != nil { - c.log.Println("ERROR: couldn't load double-spent contracts:", err) + c.log.Error("couldn't load double-spent contracts", zap.Error(err)) return err } err = c.loadRenewHistory() if err != nil { - c.log.Println("ERROR: couldn't load renewal history:", err) + c.log.Error("couldn't load renewal history", zap.Error(err)) return err } err = c.loadRenters() if err != nil { - c.log.Println("ERROR: couldn't load renters:", err) + c.log.Error("couldn't load renters", zap.Error(err)) return err } @@ -43,7 +45,7 @@ func (c *Contractor) load() error { if err != nil { return err } - c.staticWatchdog.blockHeight = c.blockHeight + c.staticWatchdog.blockHeight = c.tip.Height return nil } @@ -52,13 +54,13 @@ func (c *Contractor) load() error { func (c *Contractor) save() error { err := c.updateState() if err != nil { - c.log.Println("ERROR: couldn't save sync state:", err) + c.log.Error("couldn't save sync state", zap.Error(err)) return err } err = c.updateDoubleSpent() if err != nil { - c.log.Println("ERROR: couldn't save double-spent contracts:", err) + c.log.Error("couldn't save double-spent contracts", zap.Error(err)) return err } @@ -87,7 +89,7 @@ func (c *Contractor) threadedSaveLoop() { err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: difficulties saving the Contractor:", err) + c.log.Error("difficulties saving the Contractor", zap.Error(err)) } } } diff --git a/modules/manager/contractor/renew.go b/modules/manager/contractor/renew.go index 2a0aff0..5fb3146 100644 --- a/modules/manager/contractor/renew.go +++ b/modules/manager/contractor/renew.go @@ -11,6 +11,7 @@ import ( "github.com/mike76-dev/sia-satellite/internal/build" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -32,7 +33,7 @@ type ( // transaction set. func (c *Contractor) prepareContractRenewal(host modules.HostDBEntry, oldRev types.FileContractRevision, contractFunding, hostCollateral types.Currency, endHeight uint64, address types.Address) ([]types.Transaction, types.Transaction, []types.Transaction, types.Currency, types.Currency, types.Currency, []types.Hash256, error) { c.mu.RLock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() // Create the final revision from the provided revision. @@ -43,12 +44,16 @@ func (c *Contractor) prepareContractRenewal(host modules.HostDBEntry, oldRev typ finalRevision.RevisionNumber = math.MaxUint64 // Prepare the new contract. - fc, basePrice := rhpv3.PrepareContractRenewal(oldRev, host.Settings.Address, address, contractFunding, hostCollateral, host.PriceTable, endHeight) + expectedStorage := fundsToExpectedStorage(contractFunding, endHeight-blockHeight, host.Settings) + fc, basePrice, err := rhpv3.PrepareContractRenewal(oldRev, host.Settings.Address, address, contractFunding, hostCollateral, host.PriceTable, expectedStorage, endHeight) + if err != nil { + return nil, types.Transaction{}, nil, types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, nil, modules.AddContext(err, "unable to prepare contract renewal") + } // RHP3 contains both the contract and final revision. So we double the // estimation. - _, txnFee := c.tpool.FeeEstimation() - minerFee := txnFee.Mul64(2 * modules.EstimatedFileContractTransactionSetSize) + txnFee := c.cm.RecommendedFee() + minerFee := txnFee.Mul64(2 * 2048) // Create the transaction containing both the final revision and new // contract. @@ -64,16 +69,18 @@ func (c *Contractor) prepareContractRenewal(host modules.HostDBEntry, oldRev typ totalCost := cost.Add(minerFee).Add(basePrice).Add(tax) // Fund the transaction. - parentTxn, toSign, err := c.wallet.FundTransaction(&txn, totalCost) + parents, toSign, err := c.wallet.Fund(&txn, totalCost) if err != nil { - c.wallet.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) return nil, types.Transaction{}, nil, types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, nil, modules.AddContext(err, "unable to fund transaction") } // Make a copy of the transactions to be used to by the watchdog // to double spend these inputs in case the contract never appears on chain. sweepTxn := modules.CopyTransaction(txn) - sweepParents := []types.Transaction{modules.CopyTransaction(parentTxn)} + var sweepParents []types.Transaction + for _, parent := range parents { + sweepParents = append(sweepParents, modules.CopyTransaction(parent)) + } // Add an output that sends all funds back to the Satellite address. output := types.SiacoinOutput{ @@ -82,14 +89,14 @@ func (c *Contractor) prepareContractRenewal(host modules.HostDBEntry, oldRev typ } sweepTxn.SiacoinOutputs = append(sweepTxn.SiacoinOutputs, output) - return append([]types.Transaction{parentTxn}, txn), sweepTxn, sweepParents, totalCost, minerFee, tax, toSign, nil + return append(parents, txn), sweepTxn, sweepParents, totalCost, minerFee, tax, toSign, nil } // managedRenewContract will try to renew a contract, returning the // amount of money that was put into the contract for renewal. func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rpk types.PublicKey, rsk types.PrivateKey, contractFunding types.Currency, endHeight uint64) (fundsSpent types.Currency, newContract modules.RenterContract, err error) { c.mu.RLock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height // Check if we know this renter. renter, exists := c.renters[rpk] @@ -185,7 +192,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp host.PriceTable = pt // Check if the host is gouging. - _, txnFee := c.tpool.FeeEstimation() + txnFee := c.cm.RecommendedFee() if err := modules.CheckGouging(allowance, blockHeight, &hostSettings, &pt, txnFee); err != nil { hostFault = true return modules.AddContext(err, "host is gouging") @@ -228,25 +235,22 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp } // Renew the contract. - rev, txnSet, err = proto.RPCRenewContract(ctx, t, esk, oldRev, renterTxnSet, toSign, c.wallet) + rev, txnSet, err = proto.RPCRenewContract(ctx, t, esk, oldRev, renterTxnSet, toSign, c.wallet, c.cm.TipState()) if err != nil { if !modules.ContainsError(err, errors.New("failed to sign transaction")) { hostFault = true } - c.wallet.ReleaseInputs(renterTxnSet) + c.wallet.Release(renterTxnSet) return modules.AddContext(err, "couldn't renew contract") } // Submit to blockchain. - err = c.tpool.AcceptTransactionSet(txnSet) - if modules.ContainsError(err, errDuplicateTransactionSet) { - // As long as it made it into the transaction pool, we're good. - err = nil - } + _, err = c.cm.AddPoolTransactions(txnSet) if err != nil { - c.wallet.ReleaseInputs(txnSet) - return modules.AddContext(err, "couldn't broadcast transaction set") + c.wallet.Release(txnSet) + return modules.AddContext(err, "invalid transaction set") } + c.s.BroadcastTransactionSet(txnSet) return nil }) @@ -300,7 +304,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp // Update the hostdb to include the new contract. err = c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) } } @@ -314,7 +318,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp totalFailures++ c.numFailedRenews[id] = totalFailures c.mu.Unlock() - c.log.Println("INFO: remote host determined to be at fault, tallying up failed renews", totalFailures, id) + c.log.Info("remote host determined to be at fault, tallying up failed renews", zap.Uint64("failures", totalFailures), zap.Stringer("fcid", id)) } // Check if contract has to be replaced. @@ -330,11 +334,10 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp oldUtility.Locked = true err := c.managedUpdateContractUtility(oldFC, oldUtility) if err != nil { - c.log.Println("WARN: failed to mark contract as !goodForRenew:", err) + c.log.Warn("failed to mark contract as !goodForRenew", zap.Error(err)) } - c.log.Printf("WARN: consistently failed to renew %v, marked as bad and locked: %v\n", - hostPubKey, errRenew) + c.log.Warn(fmt.Sprintf("consistently failed to renew %v, marked as bad and locked", hostPubKey), zap.Error(errRenew)) c.staticContracts.Return(oldFC) return types.ZeroCurrency, newContract, modules.AddContext(errRenew, "contract marked as bad for too many consecutive failed renew attempts") @@ -342,12 +345,12 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp // Seems like it doesn't have to be replaced yet. Log the // failure and number of renews that have failed so far. - c.log.Printf("WARN: failed to renew contract %v [%v]: '%v', current height: %v, proposed end height: %v", hostPubKey, numRenews, errRenew, blockHeight, endHeight) + c.log.Warn(fmt.Sprintf("failed to renew contract %v [%v]: '%v', current height: %v, proposed end height: %v", hostPubKey, numRenews, errRenew, blockHeight, endHeight)) c.staticContracts.Return(oldFC) return types.ZeroCurrency, newContract, modules.AddContext(errRenew, "contract renewal with host was unsuccessful") } - c.log.Printf("INFO: renewed contract %v\n", id) + c.log.Info("renewed contract", zap.Stringer("id", id)) // Update the utility values for the new contract, and for the old // contract. @@ -356,7 +359,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp GoodForRenew: true, } if err := c.managedAcquireAndUpdateContractUtility(newContract.ID, newUtility); err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) c.staticContracts.Return(oldFC) return fundsSpent, newContract, nil } @@ -365,7 +368,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp oldUtility.GoodForUpload = false oldUtility.Locked = true if err := c.managedUpdateContractUtility(oldFC, oldUtility); err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) c.staticContracts.Return(oldFC) return fundsSpent, newContract, nil } @@ -385,7 +388,7 @@ func (c *Contractor) managedRenewContract(oldContract modules.RenterContract, rp // Update the database. err = c.updateRenewedContract(id, newContract.ID) if err != nil { - c.log.Println("ERROR: failed to update contracts in the database.") + c.log.Error("failed to update contracts in the database", zap.Error(err)) } // Delete the old contract. @@ -408,7 +411,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c // Check if we know this renter. c.mu.RLock() renter, exists := c.renters[rpk] - blockHeight := c.blockHeight + blockHeight := c.tip.Height c.mu.RUnlock() if !exists { return nil, ErrRenterNotFound @@ -418,16 +421,6 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c var numRenewFails int var renewErr error - // Register or unregister and alerts related to contract renewal. - var registerLowFundsAlert bool - defer func() { - if registerLowFundsAlert { - c.staticAlerter.RegisterAlert(modules.AlertIDRenterAllowanceLowFunds, AlertMSGAllowanceLowFunds, AlertCauseInsufficientAllowanceFunds, modules.SeverityWarning) - } else { - c.staticAlerter.UnregisterAlert(modules.AlertIDRenterAllowanceLowFunds) - } - }() - var renewSet []fileContractRenewal var fundsRemaining types.Currency @@ -437,11 +430,11 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c rc, ok := c.staticContracts.View(id) r, err := c.managedFindRenter(id) if err != nil { - c.log.Println("WARN: contract ID submitted that has no known renter associated with it:", id) + c.log.Warn("contract ID submitted that has no known renter associated with it", zap.Stringer("id", id)) continue } if !ok || r.PublicKey != rpk { - c.log.Println("WARN: contract ID submitted that doesn't belong to this renter:", id, renter.PublicKey) + c.log.Warn("contract ID submitted that doesn't belong to this renter", zap.Stringer("id", id), zap.Stringer("renter", renter.PublicKey)) continue } @@ -454,7 +447,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c spending, err := c.PeriodSpending(renter.PublicKey) if err != nil { // This should only error if the contractor is shutting down. - c.log.Println("WARN: error getting period spending:", err) + c.log.Warn("error getting period spending", zap.Error(err)) return nil, err } @@ -469,24 +462,24 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c // settings. host, _, err := c.hdb.Host(rc.HostPublicKey) if err != nil { - c.log.Println("WARN: error getting host", err) + c.log.Warn("error getting host", zap.Error(err)) continue } if host.Filtered { - c.log.Println("INFO: contract skipped because it is filtered") + c.log.Info("contract skipped because it is filtered") continue } // Skip hosts that can't use the current renter-host protocol. if build.VersionCmp(host.Settings.Version, minimumSupportedRenterHostProtocolVersion) < 0 { - c.log.Println("INFO: contract skipped because host is using an outdated version", host.Settings.Version) + c.log.Info("contract skipped because host is using an outdated version", zap.String("version", host.Settings.Version)) continue } // Skip contracts which do not exist or are otherwise unworthy for // renewal. if renter.Settings.AutoRenewContracts && (!ok || !cu.GoodForRenew) { - c.log.Println("INFO: contract skipped because it is not good for renew (utility.GoodForRenew, exists)", cu.GoodForRenew, ok) + c.log.Info("contract skipped because it is not good for renew", zap.Bool("GFR", cu.GoodForRenew), zap.Bool("exists", ok)) continue } @@ -495,7 +488,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c // (which is now ending). renewAmount, err := c.managedEstimateRenewFundingRequirements(rc, blockHeight, renter.Allowance) if err != nil { - c.log.Println("WARN: contract skipped because there was an error estimating renew funding requirements", renewAmount, err) + c.log.Warn("contract skipped because there was an error estimating renew funding requirements", zap.Stringer("amount", renewAmount), zap.Error(err)) continue } renewSet = append(renewSet, fileContractRenewal{ @@ -504,10 +497,10 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c renterPubKey: rpk, secretKey: rsk, }) - c.log.Println("INFO: contract has been added to the renew set") + c.log.Info("contract has been added to the renew set") } if len(renewSet) != 0 { - c.log.Printf("INFO: renewing %v contracts\n", len(renewSet)) + c.log.Info("renewing contracts", zap.Int("number", len(renewSet))) } // Go through the contracts we've assembled for renewal. @@ -515,21 +508,14 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c // Return here if an interrupt or kill signal has been sent. select { case <-c.tg.StopChan(): - c.log.Println("INFO: returning because the manager was stopped") + c.log.Info("returning because the manager was stopped") return nil, errors.New("the manager was stopped") default: } - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - c.log.Println("ERROR: contractor is attempting to renew contracts that are about to expire, however the wallet is locked") - return nil, err - } - // Skip this renewal if we don't have enough funds remaining. if renewal.amount.Cmp(fundsRemaining) > 0 { - c.log.Println("WARN: skipping renewal because there are not enough funds remaining in the allowance", renewal.contract.ID, renewal.amount, fundsRemaining) - registerLowFundsAlert = true + c.log.Warn("skipping renewal because there are not enough funds remaining in the allowance", zap.Stringer("fcid", renewal.contract.ID), zap.Stringer("amount", renewal.amount), zap.Stringer("remaining", fundsRemaining)) continue } @@ -539,9 +525,9 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c fundsSpent, newContract, err := c.managedRenewContract(renewal.contract, renewal.renterPubKey, renewal.secretKey, renewal.amount, renter.ContractEndHeight()) if modules.ContainsError(err, errContractNotGFR) { // Do not add a renewal error. - c.log.Println("INFO: contract skipped because it is not good for renew", renewal.contract.ID) + c.log.Info("contract skipped because it is not good for renew", zap.Stringer("fcid", renewal.contract.ID)) } else if err != nil { - c.log.Println("ERROR: error renewing contract", renewal.contract.ID, err) + c.log.Error("error renewing contract", zap.Stringer("id", renewal.contract.ID), zap.Error(err)) renewErr = modules.ComposeErrors(renewErr, err) numRenewFails++ } @@ -554,13 +540,13 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds") + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of renewals in the database. err = c.m.IncrementStats(renter.Email, true) if err != nil { - c.log.Println("ERROR: couldn't update stats") + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -570,7 +556,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) continue } @@ -578,7 +564,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } } } @@ -604,7 +590,7 @@ func (c *Contractor) RenewContracts(rpk types.PublicKey, rsk types.PrivateKey, c // Renter-Satellite protocol. func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk types.PublicKey, oldContract modules.RenterContract, contractFunding types.Currency, endHeight uint64) (fundsSpent types.Currency, newContract modules.RenterContract, err error) { c.mu.RLock() - blockHeight := c.blockHeight + blockHeight := c.tip.Height // Check if we know this renter. renter, exists := c.renters[rpk] @@ -700,7 +686,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty host.PriceTable = pt // Check if the host is gouging. - _, txnFee := c.tpool.FeeEstimation() + txnFee := c.cm.RecommendedFee() if err := modules.CheckGouging(allowance, blockHeight, &hostSettings, &pt, txnFee); err != nil { hostFault = true return modules.AddContext(err, "host is gouging") @@ -740,25 +726,22 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty } // Renew the contract. - rev, txnSet, err = proto.RPCTrustlessRenewContract(ctx, s, t, renterTxnSet, toSign, c.wallet) + rev, txnSet, err = proto.RPCTrustlessRenewContract(ctx, s, t, renterTxnSet, toSign, c.wallet, c.cm.TipState()) if err != nil { if !modules.ContainsError(err, errors.New("failed to sign transaction")) && !modules.ContainsError(err, errors.New("invalid renter signature")) { hostFault = true } - c.wallet.ReleaseInputs(renterTxnSet) + c.wallet.Release(renterTxnSet) return modules.AddContext(err, "couldn't renew contract") } // Submit to blockchain. - err = c.tpool.AcceptTransactionSet(txnSet) - if modules.ContainsError(err, errDuplicateTransactionSet) { - // As long as it made it into the transaction pool, we're good. - err = nil - } + _, err = c.cm.AddPoolTransactions(txnSet) if err != nil { - c.wallet.ReleaseInputs(txnSet) - return modules.AddContext(err, "couldn't broadcast transaction set") + c.wallet.Release(txnSet) + return modules.AddContext(err, "invalid transaction set") } + c.s.BroadcastTransactionSet(txnSet) return nil }) @@ -812,7 +795,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty // Update the hostdb to include the new contract. err = c.hdb.UpdateContracts(c.staticContracts.ViewAll()) if err != nil { - c.log.Println("ERROR: unable to update hostdb contracts:", err) + c.log.Error("unable to update hostdb contracts", zap.Error(err)) } } @@ -825,7 +808,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty c.numFailedRenews[id]++ totalFailures := c.numFailedRenews[id] c.mu.Unlock() - c.log.Println("INFO: remote host determined to be at fault, tallying up failed renews", totalFailures, id) + c.log.Info("remote host determined to be at fault, tallying up failed renews", zap.Uint64("failures", totalFailures), zap.Stringer("fcid", id)) } // Check if contract has to be replaced. @@ -841,11 +824,10 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty oldUtility.Locked = true err := c.managedUpdateContractUtility(oldFC, oldUtility) if err != nil { - c.log.Println("WARN: failed to mark contract as !goodForRenew:", err) + c.log.Warn("failed to mark contract as !goodForRenew", zap.Error(err)) } - c.log.Printf("WARN: consistently failed to renew %v, marked as bad and locked: %v\n", - hostPubKey, errRenew) + c.log.Warn(fmt.Sprintf("consistently failed to renew %v, marked as bad and locked", hostPubKey), zap.Error(errRenew)) c.staticContracts.Return(oldFC) return types.ZeroCurrency, newContract, modules.AddContext(errRenew, "contract marked as bad for too many consecutive failed renew attempts") @@ -853,12 +835,12 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty // Seems like it doesn't have to be replaced yet. Log the // failure and number of renews that have failed so far. - c.log.Printf("WARN: failed to renew contract %v [%v]: '%v', current height: %v, proposed end height: %v", hostPubKey, numRenews, errRenew, blockHeight, endHeight) + c.log.Warn(fmt.Sprintf("failed to renew contract %v [%v]: '%v', current height: %v, proposed end height: %v", hostPubKey, numRenews, errRenew, blockHeight, endHeight)) c.staticContracts.Return(oldFC) return types.ZeroCurrency, newContract, modules.AddContext(errRenew, "contract renewal with host was unsuccessful") } - c.log.Printf("INFO: renewed contract %v\n", id) + c.log.Info("renewed contract", zap.Stringer("id", id)) // Update the utility values for the new contract, and for the old // contract. @@ -867,7 +849,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty GoodForRenew: true, } if err := c.managedAcquireAndUpdateContractUtility(newContract.ID, newUtility); err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) c.staticContracts.Return(oldFC) return fundsSpent, newContract, nil } @@ -876,7 +858,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty oldUtility.GoodForUpload = false oldUtility.Locked = true if err := c.managedUpdateContractUtility(oldFC, oldUtility); err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) c.staticContracts.Return(oldFC) return fundsSpent, newContract, nil } @@ -896,7 +878,7 @@ func (c *Contractor) managedTrustlessRenewContract(s *modules.RPCSession, rpk ty // Update the database. err = c.updateRenewedContract(id, newContract.ID) if err != nil { - c.log.Println("ERROR: failed to update contracts in the database.") + c.log.Error("failed to update contracts in the database", zap.Error(err)) } // Delete the old contract. @@ -927,25 +909,18 @@ func (c *Contractor) RenewContract(s *modules.RPCSession, rpk types.PublicKey, c // Check if the contract belongs to the renter. r, err := c.managedFindRenter(contract.ID) if err != nil { - c.log.Println("WARN: contract ID submitted that has no known renter associated with it:", contract.ID) + c.log.Warn("contract ID submitted that has no known renter associated with it", zap.Stringer("id", contract.ID)) return modules.RenterContract{}, err } if r.PublicKey != rpk { - c.log.Println("WARN: contract ID submitted that doesn't belong to this renter:", contract.ID, renter.PublicKey) + c.log.Warn("contract ID submitted that doesn't belong to this renter", zap.Stringer("id", contract.ID), zap.Stringer("renter", renter.PublicKey)) return modules.RenterContract{}, errors.New("contract doesn't belong to this renter") } - // Check if the wallet is unlocked. - unlocked, err := c.wallet.Unlocked() - if !unlocked || err != nil { - c.log.Println("ERROR: contractor is attempting to renew a contract but the wallet is locked") - return modules.RenterContract{}, err - } - // Renew the contract. fundsSpent, newContract, err := c.managedTrustlessRenewContract(s, rpk, contract, funding, endHeight) if err != nil { - c.log.Printf("WARN: attempted to renew a contract with %v, but renewal failed: %v\n", contract.HostPublicKey, err) + c.log.Warn(fmt.Sprintf("attempted to renew a contract with %v, but renewal failed", contract.HostPublicKey), zap.Error(err)) return modules.RenterContract{}, err } @@ -955,13 +930,13 @@ func (c *Contractor) RenewContract(s *modules.RPCSession, rpk types.PublicKey, c amount := funds / hastings err = c.m.LockSiacoins(renter.Email, amount) if err != nil { - c.log.Println("ERROR: couldn't lock funds") + c.log.Error("couldn't lock funds", zap.Error(err)) } // Increment the number of renewals in the database. err = c.m.IncrementStats(renter.Email, true) if err != nil { - c.log.Println("ERROR: couldn't update stats") + c.log.Error("couldn't update stats", zap.Error(err)) } // Add this contract to the contractor and save. @@ -970,14 +945,14 @@ func (c *Contractor) RenewContract(s *modules.RPCSession, rpk types.PublicKey, c GoodForRenew: true, }) if err != nil { - c.log.Println("ERROR: failed to update the contract utilities", err) + c.log.Error("failed to update the contract utilities", zap.Error(err)) return modules.RenterContract{}, err } c.mu.Lock() err = c.save() c.mu.Unlock() if err != nil { - c.log.Println("ERROR: unable to save the contractor:", err) + c.log.Error("unable to save the contractor", zap.Error(err)) } return newContract, nil diff --git a/modules/manager/contractor/repair.go b/modules/manager/contractor/repair.go index 08a77f0..fdf0d29 100644 --- a/modules/manager/contractor/repair.go +++ b/modules/manager/contractor/repair.go @@ -10,6 +10,7 @@ import ( "github.com/mike76-dev/sia-satellite/internal/object" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -67,7 +68,7 @@ func (m *migrator) tryPerformMigrations() { err := m.contractor.tg.Add() if err != nil { - m.contractor.log.Println("ERROR: couldn't add thread:", err) + m.contractor.log.Error("couldn't add thread", zap.Error(err)) m.migrating = false m.mu.Unlock() return @@ -86,7 +87,7 @@ func (m *migrator) tryPerformMigrations() { // performMigrations performs the slab migrations. func (m *migrator) performMigrations(ctx context.Context) { - m.contractor.log.Println("INFO: performing migrations") + m.contractor.log.Info("performing migrations") // Prepare a channel to push work. type job struct { @@ -143,12 +144,12 @@ func (m *migrator) performMigrations(ctx context.Context) { // Deduct from the account balance. renter, err := m.contractor.m.GetRenter(rpk) if err != nil { - m.contractor.log.Println("ERROR: couldn't get the renter:", err) + m.contractor.log.Error("couldn't get the renter", zap.Error(err)) continue } ub, err := m.contractor.m.GetBalance(renter.Email) if err != nil { - m.contractor.log.Println("ERROR: couldn't get renter balance:", err) + m.contractor.log.Error("couldn't get renter balance", zap.Error(err)) continue } var fee float64 @@ -161,7 +162,7 @@ func (m *migrator) performMigrations(ctx context.Context) { ub.Balance -= cost err = m.contractor.m.UpdateBalance(renter.Email, ub) if err != nil { - m.contractor.log.Println("ERROR: couldn't update renter balance:", err) + m.contractor.log.Error("couldn't update renter balance", zap.Error(err)) continue } @@ -169,7 +170,7 @@ func (m *migrator) performMigrations(ctx context.Context) { year, month, _ := time.Now().Date() us, err := m.contractor.m.GetSpendings(renter.Email, int(month), year) if err != nil { - m.contractor.log.Println("ERROR: couldn't retrieve renter spendings:", err) + m.contractor.log.Error("couldn't retrieve renter spendings", zap.Error(err)) continue } us.Overhead += cost @@ -177,7 +178,7 @@ func (m *migrator) performMigrations(ctx context.Context) { err = m.contractor.m.UpdateSpendings(renter.Email, us, int(month), year) if err != nil { - m.contractor.log.Println("ERROR: couldn't update renter spendings:", err) + m.contractor.log.Error("couldn't update renter spendings", zap.Error(err)) } } }() @@ -191,16 +192,16 @@ func (m *migrator) performMigrations(ctx context.Context) { for j := range jobs { slab, offset, length, err := m.contractor.getSlab(j.Key) if err != nil { - m.contractor.log.Printf("ERROR: failed to fetch slab for migration %d/%d, health: %v, err: %v\n", j.slabIdx+1, j.batchSize, j.health, err) + m.contractor.log.Error(fmt.Sprintf("failed to fetch slab for migration %d/%d", j.slabIdx+1, j.batchSize), zap.Float64("health", j.health), zap.Error(err)) continue } // Migrate the slab. err = m.contractor.migrateSlab(ctx, j.renterKey, &slab) if err != nil { - m.contractor.log.Printf("ERROR: failed to migrate slab %d/%d, health: %v, err: %v\n", j.slabIdx+1, j.batchSize, j.health, err) + m.contractor.log.Error(fmt.Sprintf("failed to migrate slab %d/%d", j.slabIdx+1, j.batchSize), zap.Float64("health", j.health), zap.Error(err)) } else { - m.contractor.log.Printf("INFO: successfully migrated slab %d/%d\n", j.slabIdx+1, j.batchSize) + m.contractor.log.Info(fmt.Sprintf("successfully migrated slab %d/%d", j.slabIdx+1, j.batchSize)) // Update the slab in the database. key, _ := convertEncryptionKey(slab.Key) @@ -218,7 +219,7 @@ func (m *migrator) performMigrations(ctx context.Context) { } err = m.contractor.updateSlab(j.renterKey, s, false) if err != nil { - m.contractor.log.Printf("ERROR: failed to update slab %d/%d, err: %v\n", j.slabIdx+1, j.batchSize, err) + m.contractor.log.Error(fmt.Sprintf("failed to update slab %d/%d", j.slabIdx+1, j.batchSize), zap.Error(err)) } } @@ -253,13 +254,13 @@ OUTER: for rpk, num := range numSlabs { renter, err := m.contractor.m.GetRenter(rpk) if err != nil { - m.contractor.log.Println("ERROR: couldn't get the renter:", err) + m.contractor.log.Error("couldn't get the renter", zap.Error(err)) numSlabs[rpk] = 0 continue } ub, err := m.contractor.m.GetBalance(renter.Email) if err != nil { - m.contractor.log.Println("ERROR: couldn't get renter balance:", err) + m.contractor.log.Error("couldn't get renter balance", zap.Error(err)) numSlabs[rpk] = 0 continue } @@ -271,12 +272,12 @@ OUTER: } cost := float64(num) * fee if !ub.Subscribed && ub.Balance < cost { - m.contractor.log.Println("WARN: skipping slab migrations due to an insufficient account balance:", renter.Email) + m.contractor.log.Warn("skipping slab migrations due to an insufficient account balance", zap.String("renter", renter.Email)) numSlabs[rpk] = 0 continue } if ub.OnHold > 0 && ub.OnHold < uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds())) { - m.contractor.log.Println("WARN: skipping slab migrations due to the account being on hold:", renter.Email) + m.contractor.log.Warn("skipping slab migrations due to the account being on hold", zap.String("renter", renter.Email)) numSlabs[rpk] = 0 } } @@ -328,7 +329,7 @@ OUTER: case <-m.contractor.tg.StopChan(): return case <-m.maintenanceFinished: - m.contractor.log.Println("INFO: migrations interrupted - updating slabs for migration") + m.contractor.log.Info("migrations interrupted - updating slabs for migration") continue OUTER case jobs <- job{slab, i, len(toMigrate)}: } @@ -342,7 +343,7 @@ func (c *Contractor) managedCheckFileHealth() (toRepair []slabInfo, err error) { // Load slabs. slabs, err := c.getSlabs() if err != nil { - c.log.Println("ERROR: couldn't load slabs:", err) + c.log.Error("couldn't load slabs", zap.Error(err)) return } @@ -355,14 +356,14 @@ func (c *Contractor) managedCheckFileHealth() (toRepair []slabInfo, err error) { // Sanity check. if slab.MinShards > uint8(len(slab.Shards)) { - c.log.Printf("ERROR: retrieved less shards than MinShards (%v/%v)\n", len(slab.Shards), slab.MinShards) + c.log.Error(fmt.Sprintf("retrieved less shards than MinShards (%v/%v)", len(slab.Shards), slab.MinShards)) continue } // Check if the renter has opted in for repairs. renter, err := c.GetRenter(slab.renterKey) if err != nil { - c.log.Println("ERROR: couldn't fetch the renter:", err) + c.log.Error("couldn't fetch the renter", zap.Error(err)) continue } if !renter.Settings.AutoRepairFiles { @@ -375,11 +376,11 @@ func (c *Contractor) managedCheckFileHealth() (toRepair []slabInfo, err error) { // Fetch the host. host, exists, err := c.hdb.Host(shard.Host) if err != nil { - c.log.Println("ERROR: couldn't fetch host:", err) + c.log.Error("couldn't fetch host", zap.Error(err)) continue } if !exists { - c.log.Printf("WARN: host %v not found in the database\n", shard.Host) + c.log.Warn("host not found in the database", zap.Stringer("host", shard.Host)) continue } if !host.ScanHistory[len(host.ScanHistory)-1].Success { @@ -421,7 +422,7 @@ func (c *Contractor) managedCheckFileHealth() (toRepair []slabInfo, err error) { func (c *Contractor) migrateSlab(ctx context.Context, rpk types.PublicKey, s *object.Slab) error { // Get the current height. c.mu.RLock() - bh := c.blockHeight + bh := c.tip.Height c.mu.RUnlock() // Make two slices. One shall contain all renter contracts, diff --git a/modules/manager/contractor/update.go b/modules/manager/contractor/update.go index d18d715..35eaa58 100644 --- a/modules/manager/contractor/update.go +++ b/modules/manager/contractor/update.go @@ -2,8 +2,10 @@ package contractor import ( "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // managedArchiveContracts will figure out which contracts are no longer needed @@ -11,7 +13,7 @@ import ( func (c *Contractor) managedArchiveContracts() { // Determine the current block height. c.mu.RLock() - currentHeight := c.blockHeight + currentHeight := c.tip.Height c.mu.RUnlock() // Loop through the current set of contracts and migrate any expired ones to @@ -27,7 +29,7 @@ func (c *Contractor) managedArchiveContracts() { id := contract.ID c.staticContracts.RetireContract(id) expired = append(expired, id) - c.log.Println("INFO: archived expired contract", id) + c.log.Info("archived expired contract", zap.Stringer("id", id)) } } @@ -50,68 +52,70 @@ func (c *Contractor) managedArchiveContracts() { } } -// ProcessConsensusChange will be called by the consensus set every time there -// is a change in the blockchain. Updates will always be called in order. -func (c *Contractor) ProcessConsensusChange(cc modules.ConsensusChange) { - c.mu.Lock() - - c.blockHeight = cc.InitialHeight() - for _, block := range cc.AppliedBlocks { - if block.ID() != modules.GenesisID { - c.blockHeight++ - } +// UpdateChainState applies or reverts the updates from ChainManager. +func (c *Contractor) UpdateChainState(reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error { + for _, cru := range reverted { + c.staticWatchdog.callScanRevertUpdate(cru) } - c.staticWatchdog.callScanConsensusChange(cc) - // If the allowance is set and we have entered the next period, update - // CurrentPeriod. - renters := c.renters - for key, renter := range renters { - if renter.Allowance.Active() && c.blockHeight >= renter.CurrentPeriod+renter.Allowance.Period { - renter.CurrentPeriod += renter.Allowance.Period - c.renters[key] = renter - err := c.UpdateRenter(renter) - if err != nil { - c.log.Println("ERROR: unable to update renter:", err) + for _, cau := range applied { + c.mu.Lock() + c.tip = cau.State.Index + c.staticWatchdog.callScanApplyUpdate(cau) + + // If the allowance is set and we have entered the next period, update + // CurrentPeriod. + renters := c.renters + for key, renter := range renters { + if renter.Allowance.Active() && c.tip.Height >= renter.CurrentPeriod+renter.Allowance.Period { + renter.CurrentPeriod += renter.Allowance.Period + c.renters[key] = renter + err := c.UpdateRenter(renter) + if err != nil { + c.log.Error("unable to update renter", zap.Error(err)) + } } } - } - // Check if c.synced already signals that the contractor is synced. - synced := false - select { - case <-c.synced: - synced = true - default: - } - // If we weren't synced but are now, we close the channel. If we were - // synced but aren't anymore, we need a new channel. - if !synced && cc.Synced { - close(c.synced) - } else if synced && !cc.Synced { - c.synced = make(chan struct{}) - } + // Check if c.synced already signals that the contractor is synced. + synced := false + select { + case <-c.synced: + synced = true + default: + } + + // If we weren't synced but are now, we close the channel. If we were + // synced but aren't anymore, we need a new channel. + if !synced && c.s.Synced() && c.tip == c.cm.Tip() { + close(c.synced) + } else if synced && (!c.s.Synced() || c.tip != c.cm.Tip()) { + c.synced = make(chan struct{}) + } - // Let the watchdog take any necessary actions and update its state. We do - // this before persisting the contractor so that the watchdog is up-to-date on - // reboot. Otherwise it is possible that e.g. that the watchdog thinks a - // storage proof was missed and marks down a host for that. Other watchdog - // actions are innocuous. - if cc.Synced { - c.staticWatchdog.callCheckContracts() + // Let the watchdog take any necessary actions and update its state. We do + // this before persisting the contractor so that the watchdog is up-to-date on + // reboot. Otherwise it is possible that e.g. that the watchdog thinks a + // storage proof was missed and marks down a host for that. Other watchdog + // actions are innocuous. + if c.s.Synced() && c.tip == c.cm.Tip() { + c.staticWatchdog.callCheckContracts() + } } - c.lastChange = cc.ID - err := c.updateState() - if err != nil { - c.log.Println("ERROR: unable to save while processing a consensus change:", err) + if err := c.updateState(); err != nil { + c.log.Error("unable to save while processing a consensus change", zap.Error(err)) + c.mu.Unlock() + return err } c.mu.Unlock() // Perform contract maintenance if our blockchain is synced. Use a separate // goroutine so that the rest of the contractor is not blocked during // maintenance. - if cc.Synced { + if c.s.Synced() && c.tip == c.cm.Tip() { go c.threadedContractMaintenance() } + + return nil } diff --git a/modules/manager/contractor/upload.go b/modules/manager/contractor/upload.go index eea0d3a..b5b2cea 100644 --- a/modules/manager/contractor/upload.go +++ b/modules/manager/contractor/upload.go @@ -17,6 +17,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/proto" "github.com/montanaflynn/stats" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -245,7 +246,7 @@ func (c *Contractor) managedUploadObject(r io.Reader, rpk types.PublicKey, bucke // Fetch necessary params. c.mu.RLock() - bh := c.blockHeight + bh := c.tip.Height c.mu.RUnlock() contracts := c.staticContracts.ByRenter(rpk) @@ -304,7 +305,7 @@ func (c *Contractor) managedUploadPackedSlab(rpk types.PublicKey, data []byte, k // Fetch the renter. c.mu.RLock() - bh := c.blockHeight + bh := c.tip.Height renter, exists := c.renters[rpk] c.mu.RUnlock() if !exists { @@ -625,7 +626,7 @@ func (mgr *uploadManager) renewUploader(u *uploader) { // Remove the uploader if we can't renew it. mgr.mu.Lock() if !exists || !ok { - mgr.contractor.log.Printf("ERROR: failed to fetch renewed contract for uploader %v\n", fcid) + mgr.contractor.log.Error("failed to fetch renewed contract for uploader", zap.Stringer("fcid", fcid)) for i := 0; i < len(mgr.uploaders); i++ { if mgr.uploaders[i] == u { mgr.uploaders = append(mgr.uploaders[:i], mgr.uploaders[i+1:]...) @@ -902,7 +903,7 @@ func (u *upload) uploadShards(ctx context.Context, shards [][]byte, nextSlabChan // Relaunch non-overdrive uploads. if !done && resp.err != nil && !resp.req.overdrive { if overdriving, err := slab.launch(resp.req); err != nil { - u.mgr.contractor.log.Println("ERROR: failed to relaunch a sector upload:", err) + u.mgr.contractor.log.Error("failed to relaunch a sector upload", zap.Error(err)) if !overdriving { break // Fail the upload. } diff --git a/modules/manager/contractor/uptime.go b/modules/manager/contractor/uptime.go index f0444a4..bf3a748 100644 --- a/modules/manager/contractor/uptime.go +++ b/modules/manager/contractor/uptime.go @@ -32,7 +32,7 @@ func isOffline(host modules.HostDBEntry) bool { } // Otherwise we use the last 2 scans. This way a short connectivity problem // won't mark the host as offline. - success1 := host.ScanHistory[len(host.ScanHistory) - 1].Success - success2 := host.ScanHistory[len(host.ScanHistory) - 2].Success + success1 := host.ScanHistory[len(host.ScanHistory)-1].Success + success2 := host.ScanHistory[len(host.ScanHistory)-2].Success return !(success1 || success2) } diff --git a/modules/manager/contractor/utility.go b/modules/manager/contractor/utility.go index 2a4a03d..aa8ba2d 100644 --- a/modules/manager/contractor/utility.go +++ b/modules/manager/contractor/utility.go @@ -2,8 +2,10 @@ package contractor import ( "errors" + "fmt" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -79,7 +81,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, host, u, needsUpdate := c.managedHostInHostDBCheck(contract) if needsUpdate { if err := c.managedUpdateContractUtility(sc, u); err != nil { - c.log.Println("ERROR: unable to acquire and update contract utility:", err) + c.log.Error("unable to acquire and update contract utility", zap.Error(err)) return modules.AddContext(err, "unable to update utility after hostdb check") } return nil @@ -90,7 +92,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, if needsUpdate { err := c.managedUpdateContractUtility(sc, u) if err != nil { - c.log.Println("ERROR: unable to acquire and update contract utility:", err) + c.log.Error("unable to acquire and update contract utility", zap.Error(err)) return modules.AddContext(err, "unable to update utility after criticalUtilityChecks") } return nil @@ -98,7 +100,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, sb, err := c.hdb.ScoreBreakdown(host) if err != nil { - c.log.Println("ERROR: unable to get ScoreBreakdown for", host.PublicKey.String(), "got err:", err) + c.log.Error(fmt.Sprintf("unable to get ScoreBreakdown for %v", host.PublicKey), zap.Error(err)) return nil // It may just be this host that has an issue. } @@ -107,7 +109,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, if utilityUpdateStatus == necessaryUtilityUpdate || utilityUpdateStatus == suggestedUtilityUpdate { err = c.managedUpdateContractUtility(sc, u) if err != nil { - c.log.Println("ERROR: unable to acquire and update contract utility:", err) + c.log.Error("unable to acquire and update contract utility", zap.Error(err)) return modules.AddContext(err, "unable to update utility after checkHostScore") } return nil @@ -115,7 +117,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, // All checks passed, marking contract as GFU and GFR. if !u.GoodForUpload || !u.GoodForRenew { - c.log.Println("INFO: marking contract as being both GoodForUpload and GoodForRenew:", u.GoodForUpload, u.GoodForRenew, contract.ID) + c.log.Info("marking contract as being both GoodForUpload and GoodForRenew", zap.Stringer("fcid", contract.ID), zap.Bool("GFU", u.GoodForUpload), zap.Bool("GFR", u.GoodForRenew)) } u.GoodForUpload = true u.GoodForRenew = true @@ -123,7 +125,7 @@ func (c *Contractor) managedMarkContractUtility(contract modules.RenterContract, // Apply changes. err = c.managedUpdateContractUtility(sc, u) if err != nil { - c.log.Println("ERROR: unable to acquire and update contract utility:", err) + c.log.Error("unable to acquire and update contract utility", zap.Error(err)) return modules.AddContext(err, "unable to update utility after all checks passed.") } diff --git a/modules/manager/contractor/watchdog.go b/modules/manager/contractor/watchdog.go index deff29b..ae0b29b 100644 --- a/modules/manager/contractor/watchdog.go +++ b/modules/manager/contractor/watchdog.go @@ -6,8 +6,10 @@ import ( "sync" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // Key Assumptions: @@ -50,7 +52,6 @@ type watchdog struct { renewWindows map[types.PublicKey]uint64 blockHeight uint64 - tpool modules.TransactionPool contractor *Contractor mu sync.Mutex @@ -111,9 +112,8 @@ func newWatchdog(contractor *Contractor) *watchdog { outputDependencies: make(map[types.SiacoinOutputID]map[types.FileContractID]struct{}), renewWindows: make(map[types.PublicKey]uint64), - blockHeight: contractor.blockHeight, + blockHeight: contractor.tip.Height, - tpool: contractor.tpool, contractor: contractor, } } @@ -143,12 +143,12 @@ func (w *watchdog) callMonitorContract(args monitorContractArgs) error { defer w.mu.Unlock() if _, ok := w.contracts[args.fcID]; ok { - w.contractor.log.Println("WARN: watchdog asked to watch contract it already knowns: ", args.fcID) + w.contractor.log.Warn("watchdog asked to watch contract it already knowns", zap.Stringer("id", args.fcID)) return errAlreadyWatchingContract } if len(args.revisionTxn.FileContractRevisions) == 0 { - w.contractor.log.Println("ERROR: no revisions in revisiontxn", args) + w.contractor.log.Error("no revisions in revisiontxn") return errors.New("no revision in monitor contract args") } @@ -157,7 +157,7 @@ func (w *watchdog) callMonitorContract(args monitorContractArgs) error { saneInputs = saneInputs && len(args.sweepTxn.SiacoinInputs) != 0 saneInputs = saneInputs && args.blockHeight != 0 if !saneInputs { - w.contractor.log.Critical("bad args given for contract: ", args) + w.contractor.log.Error("bad args given for contract") return errors.New("bad args for non recovered contract") } @@ -178,35 +178,39 @@ func (w *watchdog) callMonitorContract(args monitorContractArgs) error { w.addOutputDependency(oid, args.fcID) } - w.contractor.log.Println("INFO: monitoring contract: ", args.fcID) + w.contractor.log.Info("monitoring contract", zap.Stringer("id", args.fcID)) return nil } -// callScanConsensusChange scans applied and reverted blocks, updating the -// watchdog's state with all information relevant to monitored contracts. -func (w *watchdog) callScanConsensusChange(cc modules.ConsensusChange) { +// callScanApplyUpdate scans applied blocks, updating the watchdog's state +// with all information relevant to monitored contracts. +func (w *watchdog) callScanApplyUpdate(cau chain.ApplyUpdate) { w.mu.Lock() defer w.mu.Unlock() - for _, block := range cc.RevertedBlocks { - if block.ID() != modules.GenesisID { - w.blockHeight-- - } - w.scanRevertedBlock(block) + + if cau.Block.ID() != cau.State.Network.GenesisState().Index.ID { + w.blockHeight++ } + w.scanAppliedBlock(cau.Block) +} - for _, block := range cc.AppliedBlocks { - if block.ID() != modules.GenesisID { - w.blockHeight++ - } - w.scanAppliedBlock(block) +// callScanRevertUpdate scans reverted blocks, updating the watchdog's state +// with all information relevant to monitored contracts. +func (w *watchdog) callScanRevertUpdate(cru chain.RevertUpdate) { + w.mu.Lock() + defer w.mu.Unlock() + + if cru.Block.ID() != cru.State.Network.GenesisState().Index.ID { + w.blockHeight-- } + w.scanRevertedBlock(cru.Block) } // sendTxnSet broadcasts a transaction set and logs errors that are not // duplicate transaction errors. (This is because the watchdog may be // overzealous in sending out transactions). func (w *watchdog) sendTxnSet(txnSet []types.Transaction, reason string) { - w.contractor.log.Println("INFO: sending txn set to tpool:", reason) + w.contractor.log.Info("sending txn set to txpool", zap.String("reason", reason)) // Send the transaction set in a go-routine to avoid deadlock when this // sendTxnSet is called within ProcessConsensusChange. @@ -217,9 +221,11 @@ func (w *watchdog) sendTxnSet(txnSet []types.Transaction, reason string) { } defer w.contractor.tg.Done() - err = w.tpool.AcceptTransactionSet(txnSet) - if err != nil && !modules.ContainsError(err, modules.ErrDuplicateTransactionSet) { - w.contractor.log.Println("ERROR: watchdog send transaction error: " + reason, err) + _, err = w.contractor.cm.AddPoolTransactions(txnSet) + if err != nil { + w.contractor.log.Error("watchdog send transaction error", zap.String("reason", reason), zap.Error(err)) + } else { + w.contractor.s.BroadcastTransactionSet(txnSet) } }() } @@ -227,7 +233,7 @@ func (w *watchdog) sendTxnSet(txnSet []types.Transaction, reason string) { // archiveContract archives the file contract. Include a non-zero double spend // height if the reason for archival is that the contract was double-spent. func (w *watchdog) archiveContract(fcID types.FileContractID, doubleSpendHeight uint64) { - w.contractor.log.Println("INFO: archiving contract: ", fcID) + w.contractor.log.Info("archiving contract", zap.Stringer("id", fcID)) contractData, ok := w.contracts[fcID] if !ok { return @@ -258,13 +264,13 @@ func (w *watchdog) addOutputDependency(outputID types.SiacoinOutputID, fcID type func (w *watchdog) removeOutputDependency(outputID types.SiacoinOutputID, fcID types.FileContractID) { dependentFCs, ok := w.outputDependencies[outputID] if !ok { - w.contractor.log.Printf("ERROR: unable to remove output dependency: outputID not found in outputDependencies: outputID: %v\n", outputID) + w.contractor.log.Error("unable to remove output dependency: outputID not found in outputDependencies", zap.Stringer("outputID", outputID)) return } _, foundContract := dependentFCs[fcID] if !foundContract { - w.contractor.log.Printf("ERROR: unable to remove output dependency: FileContract not marked in outputDependencies: fcID: %v, outputID: %v\n", fcID, outputID) + w.contractor.log.Error("unable to remove output dependency: FileContract not marked in outputDependencies", zap.Stringer("fcid", fcID), zap.Stringer("outputID", outputID)) return } @@ -321,7 +327,7 @@ func removeTxnFromSet(txn types.Transaction, txnSet []types.Transaction) ([]type for i, txnFromSet := range txnSet { if txnFromSet.ID() == txnID { // Create the new set without the txn. - newSet := append(txnSet[:i], txnSet[i + 1:]...) + newSet := append(txnSet[:i], txnSet[i+1:]...) return newSet, nil } } @@ -342,14 +348,14 @@ func (w *watchdog) scanAppliedBlock(block types.Block) { fcID := txn.FileContractID(i) if contractData, ok := w.contracts[fcID]; ok { contractData.contractFound = true - w.contractor.log.Println("INFO: found contract: ", fcID) + w.contractor.log.Info("found contract", zap.Stringer("id", fcID)) } } for num, rev := range txn.FileContractRevisions { if contractData, ok := w.contracts[rev.ParentID]; ok { contractData.revisionFound = rev.RevisionNumber - w.contractor.log.Println("INFO: found revision for: ", rev.ParentID, rev.RevisionNumber) + w.contractor.log.Info("found revision", zap.Stringer("fcid", rev.ParentID), zap.Uint64("number", rev.RevisionNumber)) // Look for the revision signatures. sigs := make([]types.TransactionSignature, 2) for _, sig := range txn.Signatures { @@ -367,7 +373,7 @@ func (w *watchdog) scanAppliedBlock(block types.Block) { for _, storageProof := range txn.StorageProofs { if contractData, ok := w.contracts[storageProof.ParentID]; ok { contractData.storageProofFound = w.blockHeight - w.contractor.log.Println("INFO: found storage proof: ", storageProof.ParentID) + w.contractor.log.Info("found storage proof", zap.Stringer("fcid", storageProof.ParentID)) } } @@ -400,7 +406,7 @@ func (w *watchdog) findDependencySpends(txn types.Transaction) { // double-spends any inputs for the formation transaction set. _, ok := w.contracts[fcID] if !ok { - w.contractor.log.Critical("found dependency on un-monitored formation") + w.contractor.log.Error("found dependency on un-monitored formation") continue } spendsMonitoredOutput = true @@ -423,7 +429,7 @@ func (w *watchdog) findDependencySpends(txn types.Transaction) { // Try removing this transaction from the set. prunedFormationTxnSet, err := removeTxnFromSet(txn, txnSet) if err != nil { - w.contractor.log.Println("ERROR: Error removing txn from set, inputs were double-spent:", err, fcID, len(txnSet), txn.ID()) + w.contractor.log.Error("error removing txn from set, inputs were double-spent", zap.Error(err), zap.Stringer("fcid", fcID), zap.Int("transactions", len(txnSet)), zap.Stringer("txid", txn.ID())) // Signal to the contractor that this contract's inputs were // double-spent and that it should be removed. @@ -432,7 +438,7 @@ func (w *watchdog) findDependencySpends(txn types.Transaction) { continue } - w.contractor.log.Println("INFO: removed transaction from set for: ", fcID, len(prunedFormationTxnSet), txn.ID()) + w.contractor.log.Info("removed transaction from set", zap.Stringer("fcid", fcID), zap.Int("transactions", len(prunedFormationTxnSet)), zap.Stringer("txid", txn.ID())) contractData.formationTxnSet = prunedFormationTxnSet // Get the new set of parent output IDs. @@ -466,7 +472,7 @@ func (w *watchdog) findDependencySpends(txn types.Transaction) { // of monitored contracts and also for the creation of any new dependencies for // monitored formation transaction sets. func (w *watchdog) scanRevertedBlock(block types.Block) { - w.contractor.log.Println("INFO: watchdog scanning reverted block at height: ", w.blockHeight) + w.contractor.log.Info("watchdog scanning reverted block", zap.Uint64("height", w.blockHeight)) outputsCreatedInBlock := make(map[types.SiacoinOutputID]*types.Transaction) for i := 0; i < len(block.Transactions); i++ { @@ -488,18 +494,18 @@ func (w *watchdog) scanRevertedBlock(block types.Block) { continue } - w.contractor.log.Println("INFO: contract formation txn reverted: ", fcID) + w.contractor.log.Info("contract formation txn reverted", zap.Stringer("fcid", fcID)) contractData.contractFound = false // Set watchheight to max(current watch height, current height + leeway). - if contractData.formationSweepHeight < w.blockHeight + reorgLeeway { + if contractData.formationSweepHeight < w.blockHeight+reorgLeeway { contractData.formationSweepHeight = w.blockHeight + reorgLeeway } // Sanity check: if the contract was previously confirmed, it should have // been removed from the formationTxnSet. if len(contractData.formationTxnSet) != 0 { - w.contractor.log.Critical("found reverted contract with non-empty formationTxnSet in watchdog", fcID) + w.contractor.log.Error("found reverted contract with non-empty formationTxnSet in watchdog", zap.Stringer("fcid", fcID)) } // Re-add the file contract transaction to the formationTxnSet. @@ -512,14 +518,14 @@ func (w *watchdog) scanRevertedBlock(block types.Block) { for _, rev := range txn.FileContractRevisions { if contractData, ok := w.contracts[rev.ParentID]; ok { - w.contractor.log.Println("INFO: revision for monitored contract reverted: ", rev.ParentID, rev.RevisionNumber) + w.contractor.log.Info("revision for monitored contract reverted", zap.Stringer("fcid", rev.ParentID), zap.Uint64("number", rev.RevisionNumber)) contractData.revisionFound = 0 // There are no zero revisions. } } for _, storageProof := range txn.StorageProofs { if contractData, ok := w.contracts[storageProof.ParentID]; ok { - w.contractor.log.Println("INFO: storage proof for monitored contract reverted: ", storageProof.ParentID) + w.contractor.log.Info("storage proof for monitored contract reverted", zap.Stringer("fcid", storageProof.ParentID)) contractData.storageProofFound = 0 } } @@ -559,7 +565,7 @@ func (w *watchdog) updateDependenciesFromRevertedBlock(createdOutputs map[types. } // Add the new dependencies to file contracts dependent on this output. for fcID := range dependentFCs { - w.contractor.log.Println("INFO: adding dependency to file contract:", fcID, txn.ID()) + w.contractor.log.Info("adding dependency to file contract", zap.Stringer("fcid", fcID), zap.Stringer("txid", txn.ID())) w.addDependencyToContractFormationSet(fcID, *txn) } // Queue up the parent outputs so that we can check if they are adding new @@ -582,7 +588,7 @@ func (w *watchdog) updateDependenciesFromRevertedBlock(createdOutputs map[types. } // Add the new dependencies to file contracts dependent on this output. for fcID := range dependentFCs { - w.contractor.log.Println("INFO: adding dependency to file contract:", fcID, txn.ID()) + w.contractor.log.Info("adding dependency to file contract", zap.Stringer("fcid", fcID), zap.Stringer("txid", txn.ID())) w.addDependencyToContractFormationSet(fcID, *txn) } // Queue up the parent outputs so that we can check if they are adding new @@ -648,28 +654,28 @@ func (w *watchdog) callCheckContracts() { // Check if the contract was moved to oldContracts. _, exists = w.contractor.staticContracts.OldContract(fcID) if !exists { - w.contractor.log.Printf("ERROR: contract %v not found by the watchdog\n", fcID) + w.contractor.log.Error("contract not found by the watchdog", zap.Stringer("fcid", fcID)) } w.archiveContract(fcID, 0) continue } renter, err := w.contractor.managedFindRenter(fcID) if err != nil { - w.contractor.log.Println("ERROR: renter not found by the watchdog") + w.contractor.log.Error("renter not found by the watchdog") continue } rw, exists := w.renewWindows[renter.PublicKey] if !exists { - w.contractor.log.Println("ERROR: renew window not found by the watchdog") + w.contractor.log.Error("renew window not found by the watchdog") continue } - - if (w.blockHeight >= contractData.windowStart - rw) && (contractData.revisionFound != 0) { + + if (w.blockHeight >= contractData.windowStart-rw) && (contractData.revisionFound != 0) { // Check if the most recent revision has appeared on-chain. If not send it // ourselves. Called in a go-routine because the contractor may be in // maintenance which can cause a deadlock because this function Acquires a // lock using the contractset. - w.contractor.log.Println("INFO: checking revision for monitored contract: ", fcID) + w.contractor.log.Info("checking revision for monitored contract", zap.Stringer("fcid", fcID)) go func(fcid types.FileContractID, bh uint64) { err := w.contractor.tg.Add() if err != nil { @@ -683,10 +689,10 @@ func (w *watchdog) callCheckContracts() { if w.blockHeight >= contractData.windowEnd { if contractData.storageProofFound == 0 { // TODO: penalize host / send signal back to watchee. - w.contractor.log.Println("INFO: didn't find proof", fcID) + w.contractor.log.Warn("didn't find proof", zap.Stringer("fcid", fcID)) } else { // TODO: ++ host / send signal back to watchee. - w.contractor.log.Println("INFO: did find proof", fcID) + w.contractor.log.Info("did find proof", zap.Stringer("fcid", fcID)) } w.archiveContract(fcID, 0) } @@ -703,21 +709,21 @@ func (w *watchdog) checkUnconfirmedContract(fcID types.FileContractID, contractD // anymore. var setSize int for _, txn := range contractData.formationTxnSet { - setSize += types.EncodedLen(txn) + setSize += modules.EncodedLen(txn) } - if setSize > modules.TransactionSetSizeLimit { - w.contractor.log.Println("UpdatedFormationTxnSet beyond set size limit", fcID) + if setSize > 250e3 { + w.contractor.log.Warn("updated FormationTxnSet beyond set size limit", zap.Stringer("fcid", fcID)) } - if (w.blockHeight >= contractData.formationSweepHeight) || (setSize > modules.TransactionSetSizeLimit) { - w.contractor.log.Println("Sweeping inputs: ", w.blockHeight, contractData.formationSweepHeight) + if (w.blockHeight >= contractData.formationSweepHeight) || (setSize > 250e3) { + w.contractor.log.Info("sweeping inputs", zap.Uint64("height", w.blockHeight), zap.Uint64("formation height", contractData.formationSweepHeight)) // TODO: Add parent transactions if the renter's own dependencies are // causing this to be triggered. w.sweepContractInputs(fcID, contractData) } else { // Try to broadcast the transaction set again. - debugStr := fmt.Sprintf("INFO: sending formation txn for contract with id: %v at h=%d wh=%d", fcID, w.blockHeight, contractData.formationSweepHeight) - w.contractor.log.Println(debugStr) + debugStr := fmt.Sprintf("sending formation txn for contract with id: %v at h=%d wh=%d", fcID, w.blockHeight, contractData.formationSweepHeight) + w.contractor.log.Info(debugStr) w.sendTxnSet(contractData.formationTxnSet, debugStr) } } @@ -741,13 +747,13 @@ func (w *watchdog) managedCheckMonitoredRevision(fcID types.FileContractID, heig lastRevisionTxn = contract.Metadata().Transaction w.contractor.staticContracts.Return(contract) } else { - w.contractor.log.Println("WARN: unable to acquire monitored contract from contractset", fcID) + w.contractor.log.Warn("unable to acquire monitored contract from contractset", zap.Stringer("fcid", fcID)) // Try old contracts. If the contract was renewed already it won't be in the // contractset. w.contractor.mu.RLock() contract, ok := w.contractor.staticContracts.OldContract(fcID) if !ok { - w.contractor.log.Println("ERROR: unable to acquire monitored contract from oldContracts", fcID) + w.contractor.log.Error("unable to acquire monitored contract from oldContracts", zap.Stringer("fcid", fcID)) w.contractor.mu.RUnlock() return } @@ -759,8 +765,8 @@ func (w *watchdog) managedCheckMonitoredRevision(fcID types.FileContractID, heig if lastRevNum > revNumFound { // NOTE: fee-bumping via CPFP (the watchdog will do this every block // until it sees the revision or the window has closed.) - debugStr := fmt.Sprintf("INFO: sending revision txn for contract with id: %v revNum: %d", fcID, lastRevNum) - w.contractor.log.Println(debugStr) + debugStr := fmt.Sprintf("sending revision txn for contract with id: %v revNum: %d", fcID, lastRevNum) + w.contractor.log.Info(debugStr) w.sendTxnSet([]types.Transaction{lastRevisionTxn}, debugStr) } } @@ -776,36 +782,44 @@ func (w *watchdog) sweepContractInputs(fcID types.FileContractID, contractData * txn, parents := contractData.sweepTxn, contractData.sweepParents toSign := w.contractor.wallet.MarkWalletInputs(txn) if len(toSign) == 0 { - w.contractor.log.Println("INFO: couldn't mark any owned inputs") + w.contractor.log.Info("couldn't mark any owned inputs") } // Get the size of the transaction set so far for fee calculation. - setSize := types.EncodedLen(txn) + setSize := modules.EncodedLen(txn) for _, parent := range parents { - setSize += types.EncodedLen(parent) + setSize += modules.EncodedLen(parent) } // Estimate a transaction fee and add it to the txn. - _, maxFee := w.tpool.FeeEstimation() - txnFee := maxFee.Mul64(uint64(setSize)) // Estimated transaction size in bytes. + fee := w.contractor.cm.RecommendedFee() + txnFee := fee.Mul64(uint64(setSize)) // Estimated transaction size in bytes. txn.MinerFees = append(txn.MinerFees, txnFee) // There can be refund outputs, but the last output is the one that is used to // sweep. numOuts := len(txn.SiacoinOutputs) if numOuts == 0 { - w.contractor.log.Println("ERROR: expected at least 1 output in sweepTxn", len(txn.SiacoinOutputs)) + w.contractor.log.Error("expected at least 1 output in sweepTxn", zap.Int("outputs", len(txn.SiacoinOutputs))) return } replacementOutput := types.SiacoinOutput{ - Value: txn.SiacoinOutputs[numOuts - 1].Value.Sub(txnFee), - Address: txn.SiacoinOutputs[numOuts - 1].Address, + Value: txn.SiacoinOutputs[numOuts-1].Value.Sub(txnFee), + Address: txn.SiacoinOutputs[numOuts-1].Address, } - txn.SiacoinOutputs[numOuts - 1] = replacementOutput + txn.SiacoinOutputs[numOuts-1] = replacementOutput - err := w.contractor.wallet.SignTransaction(&txn, toSign, modules.FullCoveredFields()) + for i, id := range toSign { + txn.Signatures = append(txn.Signatures, types.TransactionSignature{ + ParentID: id, + CoveredFields: types.CoveredFields{ + SiacoinInputs: []uint64{uint64(i)}, + }, + }) + } + err := w.contractor.wallet.Sign(w.contractor.cm.TipState(), &txn, toSign) if err != nil { - w.contractor.log.Println("ERROR: unable to sign sweep txn", fcID) + w.contractor.log.Error("unable to sign sweep txn", zap.Stringer("fcid", fcID), zap.Error(err)) return } diff --git a/modules/manager/contractor/watchdog_persist.go b/modules/manager/contractor/watchdog_persist.go index e85aceb..07d6710 100644 --- a/modules/manager/contractor/watchdog_persist.go +++ b/modules/manager/contractor/watchdog_persist.go @@ -55,4 +55,4 @@ func (fcs *fileContractStatus) DecodeFrom(d *types.Decoder) { } fcs.windowStart = d.ReadUint64() fcs.windowEnd = d.ReadUint64() -} +} \ No newline at end of file diff --git a/modules/manager/database.go b/modules/manager/database.go index 181a3cc..1b89e58 100644 --- a/modules/manager/database.go +++ b/modules/manager/database.go @@ -13,6 +13,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/rs/xid" + "go.uber.org/zap" "lukechampine.com/frand" "go.sia.tech/core/types" @@ -342,7 +343,7 @@ func (m *Manager) getEmailPreferences() error { buf := bytes.NewBuffer(b) d := types.NewDecoder(io.LimitedReader{R: buf, N: 24}) var threshold types.Currency - threshold.DecodeFrom(d) + (*types.V1Currency)(&threshold).DecodeFrom(d) m.email = email m.warnThreshold = threshold tx.Commit() @@ -356,7 +357,7 @@ func (m *Manager) setEmailPreferences(email string, threshold types.Currency) er var buf bytes.Buffer e := types.NewEncoder(&buf) - threshold.EncodeTo(e) + types.V1Currency(threshold).EncodeTo(e) e.Flush() _, err := m.db.Exec(` UPDATE mg_email @@ -389,11 +390,7 @@ func (m *Manager) sendWarning() { } // Check the wallet balance. - balance, _, _, err := m.wallet.ConfirmedBalance() - if err != nil { - m.log.Println("ERROR: couldn't retrieve wallet balance:", err) - return - } + balance, _, _ := m.wallet.ConfirmedBalance() if balance.Cmp(m.warnThreshold) >= 0 { return } @@ -401,13 +398,13 @@ func (m *Manager) sendWarning() { // Balance is low; check if a warning has been sent today. tx, err := m.db.Begin() if err != nil { - m.log.Println("ERROR: couldn't start transaction:", err) + m.log.Error("couldn't start transaction", zap.Error(err)) return } var timestamp uint64 err = tx.QueryRow("SELECT time_sent FROM mg_email WHERE id = 1").Scan(×tamp) if err != nil { - m.log.Println("ERROR: couldn't retrieve timestamp:", err) + m.log.Error("couldn't retrieve timestamp", zap.Error(err)) tx.Rollback() return } @@ -427,7 +424,7 @@ func (m *Manager) sendWarning() { t := template.New("warning") t, err = t.Parse(warningTemplate) if err != nil { - m.log.Printf("ERROR: unable to parse HTML template: %v\n", err) + m.log.Error("unable to parse HTML template", zap.Error(err)) tx.Rollback() return } @@ -439,7 +436,7 @@ func (m *Manager) sendWarning() { }) err = m.ms.SendMail("Sia Satellite", m.email, "Warning: Balance Low", &b) if err != nil { - m.log.Println("ERROR: unable to send a warning:", err) + m.log.Error("unable to send warning", zap.Error(err)) tx.Rollback() return } @@ -451,13 +448,13 @@ func (m *Manager) sendWarning() { WHERE id = 1 `, time.Now().Unix()) if err != nil { - m.log.Println("ERROR: couldn't update database:", err) + m.log.Error("couldn't update database", zap.Error(err)) tx.Rollback() return } if err := tx.Commit(); err != nil { - m.log.Println("ERROR: couldn't commit the changes:", err) + m.log.Error("couldn't commit the changes", zap.Error(err)) } } @@ -691,7 +688,7 @@ func (m *Manager) DeleteBufferedFiles(pk types.PublicKey) error { WHERE renter_pk = ? `, pk[:]) if err != nil { - m.log.Println("ERROR: unable to query files:", err) + m.log.Error("unable to query files", zap.Error(err)) return modules.AddContext(err, "unable to query files") } @@ -702,7 +699,7 @@ func (m *Manager) DeleteBufferedFiles(pk types.PublicKey) error { var bucket, path []byte if err := rows.Scan(&name, &bucket, &path); err != nil { rows.Close() - m.log.Println("ERROR: unable to retrieve filename:", err) + m.log.Error("unable to retrieve filename", zap.Error(err)) return modules.AddContext(err, "unable to retrieve filename") } m.hostContractor.CancelUpload(pk, bucket, path) @@ -715,11 +712,11 @@ func (m *Manager) DeleteBufferedFiles(pk types.PublicKey) error { p := filepath.Join(m.BufferedFilesDir(), name) fi, err := os.Stat(p) if err != nil { - m.log.Println("ERROR: unable to get file size:", err) + m.log.Error("unable to get file size", zap.Error(err)) return modules.AddContext(err, "unable to get file size") } if err := os.Remove(p); err != nil { - m.log.Println("ERROR: unable to delete file:", err) + m.log.Error("unable to delete file", zap.Error(err)) return modules.AddContext(err, "unable to delete file") } m.mu.Lock() @@ -727,7 +724,7 @@ func (m *Manager) DeleteBufferedFiles(pk types.PublicKey) error { m.mu.Unlock() _, err = m.db.Exec("DELETE FROM ctr_uploads WHERE filename = ?", name) if err != nil { - m.log.Println("ERROR: unable to delete file record:", err) + m.log.Error("unable to delete file record", zap.Error(err)) return modules.AddContext(err, "unable to delete file record") } } @@ -778,7 +775,7 @@ func (m *Manager) DeleteMultipartUploads(pk types.PublicKey) error { // Make a list of file names. rows, err := m.db.Query("SELECT filename FROM ctr_parts WHERE renter_pk = ?", pk[:]) if err != nil { - m.log.Println("ERROR: unable to query files:", err) + m.log.Error("unable to query files", zap.Error(err)) return modules.AddContext(err, "unable to query files") } @@ -787,7 +784,7 @@ func (m *Manager) DeleteMultipartUploads(pk types.PublicKey) error { var name string if err := rows.Scan(&name); err != nil { rows.Close() - m.log.Println("ERROR: unable to retrieve filename:", err) + m.log.Error("unable to retrieve filename", zap.Error(err)) return modules.AddContext(err, "unable to retrieve filename") } names = append(names, name) @@ -799,11 +796,11 @@ func (m *Manager) DeleteMultipartUploads(pk types.PublicKey) error { p := filepath.Join(m.BufferedFilesDir(), name) fi, err := os.Stat(p) if err != nil { - m.log.Println("ERROR: unable to get file size:", err) + m.log.Error("unable to get file size", zap.Error(err)) return modules.AddContext(err, "unable to get file size") } if err := os.Remove(p); err != nil { - m.log.Println("ERROR: unable to delete file:", err) + m.log.Error("unable to delete file", zap.Error(err)) return modules.AddContext(err, "unable to delete file") } m.mu.Lock() @@ -811,7 +808,7 @@ func (m *Manager) DeleteMultipartUploads(pk types.PublicKey) error { m.mu.Unlock() _, err = m.db.Exec("DELETE FROM ctr_parts WHERE filename = ?", name) if err != nil { - m.log.Println("ERROR: unable to delete file record:", err) + m.log.Error("unable to delete file record", zap.Error(err)) return modules.AddContext(err, "unable to delete file record") } } @@ -819,7 +816,7 @@ func (m *Manager) DeleteMultipartUploads(pk types.PublicKey) error { // Delete multipart uploads. _, err = m.db.Exec("DELETE FROM ctr_multipart WHERE renter_pk = ?", pk[:]) if err != nil { - m.log.Println("ERROR: unable to delete multipart uploads:", err) + m.log.Error("unable to delete multipart uploads", zap.Error(err)) return modules.AddContext(err, "unable to delete multipart uploads") } @@ -905,7 +902,7 @@ func (m *Manager) DeleteMultipart(rpk types.PublicKey, id types.Hash256) error { func (m *Manager) managedPruneMultipartUploads() { tx, err := m.db.Begin() if err != nil { - m.log.Println("ERROR: unable to start transaction:", err) + m.log.Error("unable to start transaction", zap.Error(err)) return } @@ -916,7 +913,7 @@ func (m *Manager) managedPruneMultipartUploads() { WHERE created < ? `, uint64(time.Now().Add(-multipartUploadPruneThreshold).Unix())) if err != nil { - m.log.Println("ERROR: unable to query multipart uploads:", err) + m.log.Error("unable to query multipart uploads", zap.Error(err)) tx.Rollback() return } @@ -925,7 +922,7 @@ func (m *Manager) managedPruneMultipartUploads() { for rows.Next() { key := make([]byte, 32) if err := rows.Scan(&key); err != nil { - m.log.Println("ERROR: unable to get upload ID:", err) + m.log.Error("unable to get upload ID", zap.Error(err)) rows.Close() tx.Rollback() return @@ -941,7 +938,7 @@ func (m *Manager) managedPruneMultipartUploads() { for _, id := range ids { rows, err = tx.Query("SELECT filename FROM ctr_parts WHERE upload_id = ?", id[:]) if err != nil { - m.log.Println("ERROR: unable to query files:", err) + m.log.Error("unable to query files", zap.Error(err)) tx.Rollback() return } @@ -951,7 +948,7 @@ func (m *Manager) managedPruneMultipartUploads() { var name string if err := rows.Scan(&name); err != nil { rows.Close() - m.log.Println("ERROR: unable to retrieve filename:", err) + m.log.Error("unable to retrieve filename", zap.Error(err)) tx.Rollback() return } @@ -963,12 +960,12 @@ func (m *Manager) managedPruneMultipartUploads() { p := filepath.Join(m.BufferedFilesDir(), name) fi, err := os.Stat(p) if err != nil { - m.log.Println("ERROR: unable to get file size:", err) + m.log.Error("unable to get file size", zap.Error(err)) tx.Rollback() return } if err := os.Remove(p); err != nil { - m.log.Println("ERROR: unable to delete file:", err) + m.log.Error("unable to delete file", zap.Error(err)) tx.Rollback() return } @@ -977,7 +974,7 @@ func (m *Manager) managedPruneMultipartUploads() { m.mu.Unlock() res, err := tx.Exec("DELETE FROM ctr_parts WHERE filename = ?", name) if err != nil { - m.log.Println("ERROR: unable to delete file record:", err) + m.log.Error("unable to delete file record", zap.Error(err)) tx.Rollback() return } @@ -989,19 +986,19 @@ func (m *Manager) managedPruneMultipartUploads() { // Delete the multipart uploads. res, err := tx.Exec("DELETE FROM ctr_multipart WHERE created < ?", uint64(time.Now().Add(-multipartUploadPruneThreshold).Unix())) if err != nil { - m.log.Println("ERROR: unable to prune multipart uploads:", err) + m.log.Error("unable to prune multipart uploads", zap.Error(err)) tx.Rollback() return } if err := tx.Commit(); err != nil { - m.log.Println("ERROR: unable to commit transaction:", err) + m.log.Error("unable to commit transaction", zap.Error(err)) return } numUploads, _ := res.RowsAffected() if numUploads > 0 { - m.log.Printf("INFO: pruned %d multipart uploads with %d parts\n", numUploads, numParts) + m.log.Info(fmt.Sprintf("pruned %d multipart uploads with %d parts", numUploads, numParts)) } } diff --git a/modules/manager/hostdb/alert.go b/modules/manager/hostdb/alert.go deleted file mode 100644 index 215074b..0000000 --- a/modules/manager/hostdb/alert.go +++ /dev/null @@ -1,9 +0,0 @@ -package hostdb - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the hostdb. It returns -// all alerts of the hostdb. -func (hdb *HostDB) Alerts() (crit, err, warn, info []modules.Alert) { - return hdb.staticAlerter.Alerts() -} diff --git a/modules/manager/hostdb/consts.go b/modules/manager/hostdb/consts.go index 8e06769..f4687b8 100644 --- a/modules/manager/hostdb/consts.go +++ b/modules/manager/hostdb/consts.go @@ -71,5 +71,5 @@ const ( // minScanSleep is the minimum amount of time that the hostdb will sleep // between performing scans of the hosts. - minScanSleep = time.Hour + 20 * time.Minute + minScanSleep = time.Hour + 20*time.Minute ) diff --git a/modules/manager/hostdb/database.go b/modules/manager/hostdb/database.go index 01be469..359cb95 100644 --- a/modules/manager/hostdb/database.go +++ b/modules/manager/hostdb/database.go @@ -2,10 +2,12 @@ package hostdb import ( "bytes" + "fmt" "io" "time" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) @@ -21,15 +23,15 @@ func (hdb *HostDB) initDB() error { return nil } _, err = hdb.db.Exec(`INSERT INTO hdb_info - (height, scan_complete, disable_ip_check, last_change, filter_mode) + (height, scan_complete, disable_ip_check, bid, filter_mode) VALUES (?, ?, ?, ?, ?) - `, 0, false, false, modules.ConsensusChangeBeginning[:], modules.HostDBDisableFilter) + `, 0, false, false, []byte{}, modules.HostDBDisableFilter) return err } // reset zeroes out the sync status of the database. func (hdb *HostDB) reset() error { - _, err := hdb.db.Exec("UPDATE hdb_info SET height = ?, last_change = ?", 0, modules.ConsensusChangeBeginning[:]) + _, err := hdb.db.Exec("UPDATE hdb_info SET height = ?, bid = ?", 0, []byte{}) return err } @@ -37,14 +39,14 @@ func (hdb *HostDB) reset() error { func (hdb *HostDB) loadDB() error { cc := make([]byte, 32) err := hdb.db.QueryRow(` - SELECT height, scan_complete, disable_ip_check, last_change, filter_mode + SELECT height, scan_complete, disable_ip_check, bid, filter_mode FROM hdb_info WHERE id = 1 - `).Scan(&hdb.blockHeight, &hdb.initialScanComplete, &hdb.disableIPViolationCheck, &cc, &hdb.filterMode) + `).Scan(&hdb.tip.Height, &hdb.initialScanComplete, &hdb.disableIPViolationCheck, &cc, &hdb.filterMode) if err != nil { return modules.AddContext(err, "couldn't load HostDB data") } - copy(hdb.lastChange[:], cc) + copy(hdb.tip.ID[:], cc) // Load filtered hosts. rows, err := hdb.db.Query("SELECT public_key FROM hdb_fhosts") @@ -97,7 +99,7 @@ func (hdb *HostDB) loadDB() error { copy(ci.RenterPublicKey[:], rpk) copy(ci.HostPublicKey[:], hpk) ci.StoredData = stored - contracts, _ := hdb.knownContracts[ci.HostPublicKey.String()] + contracts := hdb.knownContracts[ci.HostPublicKey.String()] contracts = append(contracts, ci) hdb.knownContracts[ci.HostPublicKey.String()] = contracts } @@ -110,9 +112,9 @@ func (hdb *HostDB) loadDB() error { func (hdb *HostDB) updateState() error { _, err := hdb.db.Exec(` UPDATE hdb_info - SET height = ?, scan_complete = ?, disable_ip_check = ?, last_change = ? + SET height = ?, scan_complete = ?, disable_ip_check = ?, bid = ? WHERE id = 1 - `, hdb.blockHeight, hdb.initialScanComplete, hdb.disableIPViolationCheck, hdb.lastChange[:]) + `, hdb.tip.Height, hdb.initialScanComplete, hdb.disableIPViolationCheck, hdb.tip.ID[:]) return err } @@ -310,7 +312,7 @@ func (hdb *HostDB) saveKnownContracts() error { func (hdb *HostDB) threadedLoadHosts() { err := hdb.tg.Add() if err != nil { - hdb.staticLog.Println("ERROR: couldn't start hostdb threadgroup:", err) + hdb.log.Error("couldn't start hostdb threadgroup", zap.Error(err)) return } defer hdb.tg.Done() @@ -321,7 +323,7 @@ func (hdb *HostDB) threadedLoadHosts() { // Load the scan history. scanRows, err := hdb.db.Query("SELECT public_key, time, success FROM hdb_scanhistory") if err != nil { - hdb.staticLog.Println("ERROR: could not load the scan history:", err) + hdb.log.Error("could not load the scan history", zap.Error(err)) return } @@ -337,7 +339,7 @@ func (hdb *HostDB) threadedLoadHosts() { var success bool pkBytes := make([]byte, 32) if err := scanRows.Scan(&pkBytes, ×tamp, &success); err != nil { - hdb.staticLog.Println("ERROR: could not load the scan history:", err) + hdb.log.Error("could not load the scan history", zap.Error(err)) continue } var pk types.PublicKey @@ -353,7 +355,7 @@ func (hdb *HostDB) threadedLoadHosts() { // Load the IP subnets. ipRows, err := hdb.db.Query("SELECT public_key, ip_net FROM hdb_ipnets") if err != nil { - hdb.staticLog.Println("ERROR: could not load the IP subnets:", err) + hdb.log.Error("could not load the IP subnets", zap.Error(err)) return } @@ -368,7 +370,7 @@ func (hdb *HostDB) threadedLoadHosts() { var ip string pkBytes := make([]byte, 32) if err := ipRows.Scan(&pkBytes, &ip); err != nil { - hdb.staticLog.Println("ERROR: could not load the IP subnets:", err) + hdb.log.Error("could not load the IP subnets", zap.Error(err)) continue } var pk types.PublicKey @@ -381,7 +383,7 @@ func (hdb *HostDB) threadedLoadHosts() { // Load the hosts. rows, err := hdb.db.Query("SELECT public_key, filtered, bytes FROM hdb_hosts") if err != nil { - hdb.staticLog.Println("ERROR: could not load the hosts:", err) + hdb.log.Error("could not load the hosts", zap.Error(err)) return } @@ -398,7 +400,7 @@ func (hdb *HostDB) threadedLoadHosts() { var hostBytes []byte pkBytes := make([]byte, 32) if err := rows.Scan(&pkBytes, &filtered, &hostBytes); err != nil { - hdb.staticLog.Println("ERROR: could not load the host:", err) + hdb.log.Error("could not load the host", zap.Error(err)) continue } @@ -406,7 +408,7 @@ func (hdb *HostDB) threadedLoadHosts() { d := types.NewDecoder(io.LimitedReader{R: buf, N: int64(len(hostBytes))}) decodeHostEntry(&host, d) if err := d.Err(); err != nil { - hdb.staticLog.Println("ERROR: could not load the host:", err) + hdb.log.Error("could not load the host", zap.Error(err)) continue } copy(host.PublicKey[:], pkBytes) @@ -420,13 +422,13 @@ func (hdb *HostDB) threadedLoadHosts() { // that previously the FirstSeen values and the blockHeight values // could get out of sync. hdb.mu.Lock() - if hdb.blockHeight < host.FirstSeen { - host.FirstSeen = hdb.blockHeight + if hdb.tip.Height < host.FirstSeen { + host.FirstSeen = hdb.tip.Height } err := hdb.insert(host) if err != nil { - hdb.staticLog.Printf("ERROR: could not insert host %v into hosttree while loading: %v\n", host.Settings.NetAddress, err) + hdb.log.Error(fmt.Sprintf("could not insert host %v into hosttree while loading", host.Settings.NetAddress), zap.Error(err)) hdb.mu.Unlock() continue } diff --git a/modules/manager/hostdb/hostdb.go b/modules/manager/hostdb/hostdb.go index cc2c121..e672a73 100644 --- a/modules/manager/hostdb/hostdb.go +++ b/modules/manager/hostdb/hostdb.go @@ -20,8 +20,10 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/hostdb/hosttree" "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) var ( @@ -29,11 +31,6 @@ var ( // allowed to be executed before the initial host scan has finished. ErrInitialScanIncomplete = errors.New("initial hostdb scan is not yet completed") - // Nil dependency errors. - errNilCS = errors.New("cannot create hostdb with nil consensus set") - errNilGateway = errors.New("cannot create hostdb with nil gateway") - errNilDB = errors.New("cannot create hostdb with nil database") - // errHostNotFoundInTree is returned when the host is not found in the // hosttree. errHostNotFoundInTree = errors.New("host not found in hosttree") @@ -123,7 +120,7 @@ func (bd *filteredDomains) managedIsFiltered(addr modules.NetAddress) bool { // we check is of the format domain.com. This is to protect the user // from accidentally submitting `com`, or some other TLD, and blocking // every host in the HostDB. - for i := 0; i < len(elements) - 1; i++ { + for i := 0; i < len(elements)-1; i++ { domainToCheck := strings.Join(elements[i:], ".") if _, blocked := bd.domains[domainToCheck]; blocked { return true @@ -144,14 +141,13 @@ type contractInfo struct { // for uploading files. type HostDB struct { // Dependencies. - db *sql.DB - cs modules.ConsensusSet - gateway modules.Gateway + db *sql.DB + cm *chain.Manager + s modules.Syncer - staticLog *persist.Logger - mu sync.RWMutex - staticAlerter *modules.GenericAlerter - tg siasync.ThreadGroup + log *zap.Logger + mu sync.RWMutex + tg siasync.ThreadGroup // knownContracts are contracts which the HostDB was informed about by the // Contractor. It contains infos about active contracts we have formed with @@ -179,7 +175,6 @@ type HostDB struct { scanMap map[string]struct{} scanWait bool scanningThreads int - synced bool loadingComplete bool // staticFilteredTree is a hosttree that only contains the hosts that align @@ -193,8 +188,7 @@ type HostDB struct { // filteredDomains tracks blocked domains for the hostdb. filteredDomains *filteredDomains - blockHeight uint64 - lastChange modules.ConsensusChangeID + tip types.ChainIndex } // Enforce that HostDB satisfies the modules.HostDB interface. @@ -274,13 +268,6 @@ func (hdb *HostDB) managedSetScoreFunction(sf hosttree.ScoreFunc) error { return err } -// managedSynced returns true if the hostdb is synced with the consensusset. -func (hdb *HostDB) managedSynced() bool { - hdb.mu.RLock() - defer hdb.mu.RUnlock() - return hdb.synced -} - // updateContracts rebuilds the knownContracts of the HostDB using the provided // contracts. func (hdb *HostDB) updateContracts(contracts []modules.RenterContract) { @@ -288,7 +275,7 @@ func (hdb *HostDB) updateContracts(contracts []modules.RenterContract) { knownContracts := make(map[string][]contractInfo) for _, contract := range contracts { if n := len(contract.Transaction.FileContractRevisions); n != 1 { - hdb.staticLog.Println("CRITICAL: contract's transaction should contain 1 revision but had ", n) + hdb.log.Error(fmt.Sprintf("contract's transaction should contain 1 revision but had %d", n)) continue } kc, exists := knownContracts[contract.HostPublicKey.String()] @@ -307,7 +294,7 @@ func (hdb *HostDB) updateContracts(contracts []modules.RenterContract) { kc = append(kc, contractInfo{ RenterPublicKey: contract.RenterPublicKey, HostPublicKey: contract.HostPublicKey, - StoredData: contract.Transaction.FileContractRevisions[0].Filesize, + StoredData: contract.Transaction.FileContractRevisions[0].Filesize, }) } knownContracts[contract.HostPublicKey.String()] = kc @@ -323,41 +310,29 @@ func (hdb *HostDB) updateContracts(contracts []modules.RenterContract) { // Update the set of known contracts in the hostdb, log if the number of // contracts has decreased. if len(hdb.knownContracts) > len(knownContracts) { - hdb.staticLog.Printf("INFO: hostdb is decreasing from %v known contracts to %v known contracts", len(hdb.knownContracts), len(knownContracts)) + hdb.log.Info(fmt.Sprintf("hostdb is decreasing from %v known contracts to %v known contracts", len(hdb.knownContracts), len(knownContracts))) } hdb.knownContracts = knownContracts // Save the hostdb to persist the update. err := hdb.saveKnownContracts() if err != nil { - hdb.staticLog.Println("ERROR: couldn't save set of known contracts:", err) + hdb.log.Error("couldn't save set of known contracts", zap.Error(err)) } } // hostdbBlockingStartup handles the blocking portion of New. -func hostdbBlockingStartup(db *sql.DB, g modules.Gateway, cs modules.ConsensusSet, dir string) (*HostDB, error) { - // Check for nil inputs. - if g == nil { - return nil, errNilGateway - } - if cs == nil { - return nil, errNilCS - } - if db == nil { - return nil, errNilDB - } - +func hostdbBlockingStartup(db *sql.DB, cm *chain.Manager, s modules.Syncer, dir string) (*HostDB, error) { // Create the HostDB object. hdb := &HostDB{ - db: db, - cs: cs, - gateway: g, + db: db, + cm: cm, + s: s, filteredDomains: newFilteredDomains(nil), filteredHosts: make(map[string]types.PublicKey), knownContracts: make(map[string][]contractInfo), scanMap: make(map[string]struct{}), - staticAlerter: modules.NewAlerter("hostdb"), } // Set the allowance and hostscore function. @@ -365,21 +340,18 @@ func hostdbBlockingStartup(db *sql.DB, g modules.Gateway, cs modules.ConsensusSe hdb.scoreFunc = hdb.managedCalculateHostScoreFn(hdb.allowance) // Create the logger. - logger, err := persist.NewFileLogger(filepath.Join(dir, "hostdb.log")) + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "hostdb.log")) if err != nil { return nil, err } - hdb.staticLog = logger + hdb.log = logger hdb.tg.AfterStop(func() { - if err := hdb.staticLog.Close(); err != nil { - // Resort to println as the logger is in an uncertain state. - fmt.Println("Failed to close the hostdb logger:", err) - } + closeFn() }) // The host tree is used to manage hosts and query them at random. The // filteredTree is used when whitelist or blacklist is enabled. - hdb.staticHostTree = hosttree.New(hdb.scoreFunc, hdb.staticLog) + hdb.staticHostTree = hosttree.New(hdb.scoreFunc, hdb.log) hdb.filteredTree = hdb.staticHostTree // Load the prior persistence structures. @@ -396,47 +368,56 @@ func hostdbBlockingStartup(db *sql.DB, g modules.Gateway, cs modules.ConsensusSe // Spawn the scan loop. go hdb.threadedScan() - hdb.tg.OnStop(func() { - cs.Unsubscribe(hdb) - }) return hdb, nil } // hostdbAsyncStartup handles the async portion of New. -func hostdbAsyncStartup(hdb *HostDB, cs modules.ConsensusSet) error { - err := cs.ConsensusSetSubscribe(hdb, hdb.lastChange, hdb.tg.StopChan()) - if modules.ContainsError(err, siasync.ErrStopped) { - return err - } - if modules.ContainsError(err, modules.ErrInvalidConsensusChangeID) { +func hostdbAsyncStartup(hdb *HostDB) error { + err := hdb.sync(hdb.tip) + if err != nil { // Subscribe again using the new ID. This will cause a triggered scan // on all of the hosts, but that should be acceptable. hdb.mu.Lock() - hdb.blockHeight = 0 - hdb.lastChange = modules.ConsensusChangeBeginning + hdb.tip = types.ChainIndex{} err = hdb.reset() hdb.mu.Unlock() if err != nil { return err } - err = cs.ConsensusSetSubscribe(hdb, hdb.lastChange, hdb.tg.StopChan()) + err = hdb.sync(hdb.tip) } - if modules.ContainsError(err, siasync.ErrStopped) { - return nil - } - if err != nil { - return err + return err +} + +func (hdb *HostDB) sync(index types.ChainIndex) error { + for index != hdb.cm.Tip() { + select { + case <-hdb.tg.StopChan(): + return nil + default: + } + crus, caus, err := hdb.cm.UpdatesSince(index, 100) + if err != nil { + hdb.log.Error("failed to subscribe to chain manager", zap.Error(err)) + return err + } else if err := hdb.UpdateChainState(crus, caus); err != nil { + hdb.log.Error("failed to update chain state", zap.Error(err)) + return err + } + if len(caus) > 0 { + index = caus[len(caus)-1].State.Index + } } return nil } // New returns a new HostDB. -func New(db *sql.DB, g modules.Gateway, cs modules.ConsensusSet, dir string) (*HostDB, <-chan error) { +func New(db *sql.DB, cm *chain.Manager, s modules.Syncer, dir string) (*HostDB, <-chan error) { errChan := make(chan error, 1) // Blocking startup. - hdb, err := hostdbBlockingStartup(db, g, cs, dir) + hdb, err := hostdbBlockingStartup(db, cm, s, dir) if err != nil { errChan <- err return nil, errChan @@ -451,7 +432,7 @@ func New(db *sql.DB, g modules.Gateway, cs modules.ConsensusSet, dir string) (*H } defer hdb.tg.Done() // Subscribe to the consensus set in a separate goroutine. - err := hostdbAsyncStartup(hdb, cs) + err := hostdbAsyncStartup(hdb) if err != nil { errChan <- err } @@ -476,7 +457,7 @@ func (hdb *HostDB) ActiveHosts() (activeHosts []modules.HostDBEntry, err error) if len(entry.ScanHistory) == 0 { continue } - if !entry.ScanHistory[len(entry.ScanHistory) - 1].Success { + if !entry.ScanHistory[len(entry.ScanHistory)-1].Success { continue } if !entry.Settings.AcceptingContracts { @@ -577,7 +558,7 @@ func (hdb *HostDB) Host(pk types.PublicKey) (modules.HostDBEntry, bool, error) { _, ok := filteredHosts[pk.String()] host.Filtered = whitelist != ok hdb.mu.RLock() - updateHostHistoricInteractions(&host, hdb.blockHeight) + updateHostHistoricInteractions(&host, hdb.tip.Height) hdb.mu.RUnlock() return host, exists, nil } @@ -617,10 +598,10 @@ func (hdb *HostDB) SetFilterMode(fm modules.FilterMode, hosts []types.PublicKey, for _, pk := range hdb.filteredHosts { err := hdb.staticHostTree.SetFiltered(pk, false) if err != nil { - hdb.staticLog.Println("ERROR: unable to mark entry as not filtered:", err) + hdb.log.Error("unable to mark entry as not filtered", zap.Error(err)) } if err := hdb.filterHost(pk, false); err != nil { - hdb.staticLog.Println("ERROR: unable to save unfiltered host:", err) + hdb.log.Error("unable to save unfiltered host", zap.Error(err)) } } // Reset filtered fields. @@ -638,7 +619,7 @@ func (hdb *HostDB) SetFilterMode(fm modules.FilterMode, hosts []types.PublicKey, } // Create filtered HostTree. - hdb.filteredTree = hosttree.New(hdb.scoreFunc, hdb.staticLog) + hdb.filteredTree = hosttree.New(hdb.scoreFunc, hdb.log) filteredDomains := newFilteredDomains(netAddresses) // Create filteredHosts map. @@ -653,10 +634,10 @@ func (hdb *HostDB) SetFilterMode(fm modules.FilterMode, hosts []types.PublicKey, // Update host in unfiltered hosttree. err := hdb.staticHostTree.SetFiltered(h, true) if err != nil { - hdb.staticLog.Println("ERROR: unable to mark entry as filtered:", err) + hdb.log.Error("unable to mark entry as filtered", zap.Error(err)) } if err := hdb.filterHost(h, true); err != nil { - hdb.staticLog.Println("ERROR: unable to save filtered host:", err) + hdb.log.Error("unable to save filtered host", zap.Error(err)) } } @@ -671,10 +652,10 @@ func (hdb *HostDB) SetFilterMode(fm modules.FilterMode, hosts []types.PublicKey, // Update host in unfiltered hosttree. err := hdb.staticHostTree.SetFiltered(host.PublicKey, true) if err != nil { - hdb.staticLog.Println("ERROR: unable to mark entry as filtered:", err) + hdb.log.Error("unable to mark entry as filtered", zap.Error(err)) } if err := hdb.filterHost(host.PublicKey, true); err != nil { - hdb.staticLog.Println("ERROR: unable to save filtered host:", err) + hdb.log.Error("unable to save filtered host", zap.Error(err)) } } for _, host := range allHosts { @@ -705,7 +686,7 @@ func (hdb *HostDB) InitialScanComplete() (complete bool, height uint64, err erro hdb.mu.Lock() defer hdb.mu.Unlock() complete = hdb.initialScanComplete - height = hdb.blockHeight + height = hdb.tip.Height return } diff --git a/modules/manager/hostdb/hostentry.go b/modules/manager/hostdb/hostentry.go index 5d3e841..42099ab 100644 --- a/modules/manager/hostdb/hostentry.go +++ b/modules/manager/hostdb/hostentry.go @@ -40,14 +40,14 @@ func updateHostHistoricInteractions(host *modules.HostDBEntry, bh uint64) { // more than recentInteractionWeightLimit of the decay limit. rsi := float64(host.RecentSuccessfulInteractions) rfi := float64(host.RecentFailedInteractions) - if hsi + hfi > historicInteractionDecayLimit { - if rsi + rfi > recentInteractionWeightLimit * (hsi + hfi) { + if hsi+hfi > historicInteractionDecayLimit { + if rsi+rfi > recentInteractionWeightLimit*(hsi+hfi) { adjustment := recentInteractionWeightLimit * (hsi + hfi) / (rsi + rfi) rsi *= adjustment rfi *= adjustment } } else { - if rsi + rfi > recentInteractionWeightLimit * historicInteractionDecayLimit { + if rsi+rfi > recentInteractionWeightLimit*historicInteractionDecayLimit { adjustment := recentInteractionWeightLimit * historicInteractionDecayLimit / (rsi + rfi) rsi *= adjustment rfi *= adjustment @@ -57,8 +57,8 @@ func updateHostHistoricInteractions(host *modules.HostDBEntry, bh uint64) { hfi += rfi // Apply the decay of the rest of the blocks. - if passedTime > 1 && hsi + hfi > historicInteractionDecayLimit { - decay := math.Pow(historicInteractionDecay, float64(passedTime - 1)) + if passedTime > 1 && hsi+hfi > historicInteractionDecayLimit { + decay := math.Pow(historicInteractionDecay, float64(passedTime-1)) hsi *= decay hfi *= decay } @@ -91,7 +91,7 @@ func (hdb *HostDB) IncrementSuccessfulInteractions(key types.PublicKey) error { } // Update historic values if necessary. - updateHostHistoricInteractions(&host, hdb.blockHeight) + updateHostHistoricInteractions(&host, hdb.tip.Height) // Increment the successful interactions. host.RecentSuccessfulInteractions++ @@ -112,7 +112,7 @@ func (hdb *HostDB) IncrementFailedInteractions(key types.PublicKey) error { defer hdb.mu.Unlock() // If we are offline it probably wasn't the host's fault. - if !hdb.gateway.Online() { + if len(hdb.s.Peers()) == 0 { return nil } @@ -123,7 +123,7 @@ func (hdb *HostDB) IncrementFailedInteractions(key types.PublicKey) error { } // Update historic values if necessary. - updateHostHistoricInteractions(&host, hdb.blockHeight) + updateHostHistoricInteractions(&host, hdb.tip.Height) // Increment the failed interactions. host.RecentFailedInteractions++ diff --git a/modules/manager/hostdb/hostscore.go b/modules/manager/hostdb/hostscore.go index 34c04e8..64ce092 100644 --- a/modules/manager/hostdb/hostscore.go +++ b/modules/manager/hostdb/hostscore.go @@ -197,7 +197,7 @@ func (hdb *HostDB) ageScore(entry modules.HostDBEntry) float64 { {1, 3}, } - height := hdb.blockHeight + height := hdb.tip.Height age := (height - entry.FirstSeen) / modules.BlocksPerDay weight := 1.0 for _, w := range weights { @@ -346,7 +346,7 @@ func (hdb *HostDB) versionScore(entry modules.HostDBEntry) float64 { version string penalty float64 }{ - {"1.6.0", 0.99}, + {"1.6.0", 0.10}, {"1.5.9", 0.00}, } weight := 1.0 diff --git a/modules/manager/hostdb/hosttree/addressfilter.go b/modules/manager/hostdb/hosttree/addressfilter.go index 23a288c..952bb62 100644 --- a/modules/manager/hostdb/hosttree/addressfilter.go +++ b/modules/manager/hostdb/hosttree/addressfilter.go @@ -19,7 +19,7 @@ const ( // Filter filters host addresses which belong to the same subnet to // avoid selecting hosts from the same region. type Filter struct { - filter map[string]struct{} + filter map[string]struct{} } // NewFilter creates a new addressFilter object. diff --git a/modules/manager/hostdb/hosttree/hosttree.go b/modules/manager/hostdb/hosttree/hosttree.go index d6e522d..076c8f0 100644 --- a/modules/manager/hostdb/hosttree/hosttree.go +++ b/modules/manager/hostdb/hosttree/hosttree.go @@ -7,7 +7,7 @@ import ( "sync" "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" @@ -41,7 +41,7 @@ type ( // scoreFn calculates the score of a hostEntry. scoreFn ScoreFunc - log *persist.Logger + log *zap.Logger mu sync.Mutex } @@ -63,12 +63,12 @@ type ( score types.Currency entry *hostEntry - log *persist.Logger + log *zap.Logger } ) // createNode creates a new node using the provided `parent` and `entry`. -func createNode(parent *node, entry *hostEntry, log *persist.Logger) *node { +func createNode(parent *node, entry *hostEntry, log *zap.Logger) *node { return &node{ parent: parent, score: entry.score, @@ -82,7 +82,7 @@ func createNode(parent *node, entry *hostEntry, log *persist.Logger) *node { } // New creates a new HostTree given a score function. -func New(sf ScoreFunc, log *persist.Logger) *HostTree { +func New(sf ScoreFunc, log *zap.Logger) *HostTree { return &HostTree{ hosts: make(map[string]*node), root: &node{ @@ -143,7 +143,7 @@ func (n *node) recursiveInsert(entry *hostEntry) (nodesAdded int, newnode *node) func (n *node) nodeAtScore(score types.Currency) *node { // Sanity check - score must be less than the total score of the tree. if score.Cmp(n.score) > 0 { - n.log.Println("CRITICAL: node score corruption") + n.log.Error("node score corruption") return nil } @@ -159,7 +159,7 @@ func (n *node) nodeAtScore(score types.Currency) *node { } if !n.taken { - n.log.Println("CRITICAL: node tree structure corruption") + n.log.Error("node tree structure corruption") return nil } @@ -347,14 +347,14 @@ func (ht *HostTree) SelectRandom(n int, blacklist, addressBlacklist []types.Publ randScoreBig := frand.BigIntn(ht.root.score.Big()) b := randScoreBig.Bytes() buf := make([]byte, 16) - copy(buf[16 - len(b):], b[:]) + copy(buf[16-len(b):], b[:]) randScore := types.NewCurrency(binary.BigEndian.Uint64(buf[8:]), binary.BigEndian.Uint64(buf[:8])) node := ht.root.nodeAtScore(randScore) scoreOne := types.NewCurrency64(1) if node.entry.Settings.AcceptingContracts && len(node.entry.ScanHistory) > 0 && - node.entry.ScanHistory[len(node.entry.ScanHistory) - 1].Success && + node.entry.ScanHistory[len(node.entry.ScanHistory)-1].Success && !filter.Filtered(modules.NetAddress(node.entry.Settings.NetAddress)) && node.entry.score.Cmp(scoreOne) > 0 { // The host must be online and accepting contracts to be returned diff --git a/modules/manager/hostdb/hosttree/scorebreakdown.go b/modules/manager/hostdb/hosttree/scorebreakdown.go index 2d45258..2534ccd 100644 --- a/modules/manager/hostdb/hosttree/scorebreakdown.go +++ b/modules/manager/hostdb/hosttree/scorebreakdown.go @@ -59,4 +59,4 @@ func (h HostAdjustments) Score() types.Currency { return types.NewCurrency64(1) } return score -} +} \ No newline at end of file diff --git a/modules/manager/hostdb/persist.go b/modules/manager/hostdb/persist.go index bc45de9..ce556e3 100644 --- a/modules/manager/hostdb/persist.go +++ b/modules/manager/hostdb/persist.go @@ -23,16 +23,16 @@ func encodeHostEntry(entry *modules.HostDBEntry, e *types.Encoder) { e.WriteUint64(entry.Settings.TotalStorage) entry.Settings.Address.EncodeTo(e) e.WriteUint64(entry.Settings.WindowSize) - entry.Settings.Collateral.EncodeTo(e) - entry.Settings.MaxCollateral.EncodeTo(e) - entry.Settings.BaseRPCPrice.EncodeTo(e) - entry.Settings.ContractPrice.EncodeTo(e) - entry.Settings.DownloadBandwidthPrice.EncodeTo(e) - entry.Settings.SectorAccessPrice.EncodeTo(e) - entry.Settings.StoragePrice.EncodeTo(e) - entry.Settings.UploadBandwidthPrice.EncodeTo(e) + types.V1Currency(entry.Settings.Collateral).EncodeTo(e) + types.V1Currency(entry.Settings.MaxCollateral).EncodeTo(e) + types.V1Currency(entry.Settings.BaseRPCPrice).EncodeTo(e) + types.V1Currency(entry.Settings.ContractPrice).EncodeTo(e) + types.V1Currency(entry.Settings.DownloadBandwidthPrice).EncodeTo(e) + types.V1Currency(entry.Settings.SectorAccessPrice).EncodeTo(e) + types.V1Currency(entry.Settings.StoragePrice).EncodeTo(e) + types.V1Currency(entry.Settings.UploadBandwidthPrice).EncodeTo(e) e.WriteUint64(uint64(entry.Settings.EphemeralAccountExpiry)) - entry.Settings.MaxEphemeralAccountBalance.EncodeTo(e) + types.V1Currency(entry.Settings.MaxEphemeralAccountBalance).EncodeTo(e) e.WriteUint64(entry.Settings.RevisionNumber) e.WriteString(entry.Settings.Version) e.WriteString(entry.Settings.SiaMuxPort) @@ -41,32 +41,32 @@ func encodeHostEntry(entry *modules.HostDBEntry, e *types.Encoder) { e.Write(entry.PriceTable.UID[:]) e.WriteUint64(uint64(entry.PriceTable.Validity)) e.WriteUint64(entry.PriceTable.HostBlockHeight) - entry.PriceTable.UpdatePriceTableCost.EncodeTo(e) - entry.PriceTable.AccountBalanceCost.EncodeTo(e) - entry.PriceTable.FundAccountCost.EncodeTo(e) - entry.PriceTable.LatestRevisionCost.EncodeTo(e) - entry.PriceTable.SubscriptionMemoryCost.EncodeTo(e) - entry.PriceTable.SubscriptionNotificationCost.EncodeTo(e) - entry.PriceTable.InitBaseCost.EncodeTo(e) - entry.PriceTable.MemoryTimeCost.EncodeTo(e) - entry.PriceTable.DownloadBandwidthCost.EncodeTo(e) - entry.PriceTable.UploadBandwidthCost.EncodeTo(e) - entry.PriceTable.DropSectorsBaseCost.EncodeTo(e) - entry.PriceTable.DropSectorsUnitCost.EncodeTo(e) - entry.PriceTable.HasSectorBaseCost.EncodeTo(e) - entry.PriceTable.ReadBaseCost.EncodeTo(e) - entry.PriceTable.ReadLengthCost.EncodeTo(e) - entry.PriceTable.RenewContractCost.EncodeTo(e) - entry.PriceTable.RevisionBaseCost.EncodeTo(e) - entry.PriceTable.SwapSectorBaseCost.EncodeTo(e) - entry.PriceTable.WriteBaseCost.EncodeTo(e) - entry.PriceTable.WriteLengthCost.EncodeTo(e) - entry.PriceTable.WriteStoreCost.EncodeTo(e) - entry.PriceTable.TxnFeeMinRecommended.EncodeTo(e) - entry.PriceTable.TxnFeeMaxRecommended.EncodeTo(e) - entry.PriceTable.ContractPrice.EncodeTo(e) - entry.PriceTable.CollateralCost.EncodeTo(e) - entry.PriceTable.MaxCollateral.EncodeTo(e) + types.V1Currency(entry.PriceTable.UpdatePriceTableCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.AccountBalanceCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.FundAccountCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.LatestRevisionCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.SubscriptionMemoryCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.SubscriptionNotificationCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.InitBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.MemoryTimeCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.DownloadBandwidthCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.UploadBandwidthCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.DropSectorsBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.DropSectorsUnitCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.HasSectorBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.ReadBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.ReadLengthCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.RenewContractCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.RevisionBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.SwapSectorBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.WriteBaseCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.WriteLengthCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.WriteStoreCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.TxnFeeMinRecommended).EncodeTo(e) + types.V1Currency(entry.PriceTable.TxnFeeMaxRecommended).EncodeTo(e) + types.V1Currency(entry.PriceTable.ContractPrice).EncodeTo(e) + types.V1Currency(entry.PriceTable.CollateralCost).EncodeTo(e) + types.V1Currency(entry.PriceTable.MaxCollateral).EncodeTo(e) e.WriteUint64(entry.PriceTable.MaxDuration) e.WriteUint64(entry.PriceTable.WindowSize) e.WriteUint64(entry.PriceTable.RegistryEntriesLeft) @@ -98,16 +98,16 @@ func decodeHostEntry(entry *modules.HostDBEntry, d *types.Decoder) { entry.Settings.TotalStorage = d.ReadUint64() entry.Settings.Address.DecodeFrom(d) entry.Settings.WindowSize = d.ReadUint64() - entry.Settings.Collateral.DecodeFrom(d) - entry.Settings.MaxCollateral.DecodeFrom(d) - entry.Settings.BaseRPCPrice.DecodeFrom(d) - entry.Settings.ContractPrice.DecodeFrom(d) - entry.Settings.DownloadBandwidthPrice.DecodeFrom(d) - entry.Settings.SectorAccessPrice.DecodeFrom(d) - entry.Settings.StoragePrice.DecodeFrom(d) - entry.Settings.UploadBandwidthPrice.DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.Collateral).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.MaxCollateral).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.BaseRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.ContractPrice).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.DownloadBandwidthPrice).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.SectorAccessPrice).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.StoragePrice).DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.UploadBandwidthPrice).DecodeFrom(d) entry.Settings.EphemeralAccountExpiry = time.Duration(d.ReadUint64()) - entry.Settings.MaxEphemeralAccountBalance.DecodeFrom(d) + (*types.V1Currency)(&entry.Settings.MaxEphemeralAccountBalance).DecodeFrom(d) entry.Settings.RevisionNumber = d.ReadUint64() entry.Settings.Version = d.ReadString() entry.Settings.SiaMuxPort = d.ReadString() @@ -116,32 +116,32 @@ func decodeHostEntry(entry *modules.HostDBEntry, d *types.Decoder) { d.Read(entry.PriceTable.UID[:]) entry.PriceTable.Validity = time.Duration(d.ReadUint64()) entry.PriceTable.HostBlockHeight = d.ReadUint64() - entry.PriceTable.UpdatePriceTableCost.DecodeFrom(d) - entry.PriceTable.AccountBalanceCost.DecodeFrom(d) - entry.PriceTable.FundAccountCost.DecodeFrom(d) - entry.PriceTable.LatestRevisionCost.DecodeFrom(d) - entry.PriceTable.SubscriptionMemoryCost.DecodeFrom(d) - entry.PriceTable.SubscriptionNotificationCost.DecodeFrom(d) - entry.PriceTable.InitBaseCost.DecodeFrom(d) - entry.PriceTable.MemoryTimeCost.DecodeFrom(d) - entry.PriceTable.DownloadBandwidthCost.DecodeFrom(d) - entry.PriceTable.UploadBandwidthCost.DecodeFrom(d) - entry.PriceTable.DropSectorsBaseCost.DecodeFrom(d) - entry.PriceTable.DropSectorsUnitCost.DecodeFrom(d) - entry.PriceTable.HasSectorBaseCost.DecodeFrom(d) - entry.PriceTable.ReadBaseCost.DecodeFrom(d) - entry.PriceTable.ReadLengthCost.DecodeFrom(d) - entry.PriceTable.RenewContractCost.DecodeFrom(d) - entry.PriceTable.RevisionBaseCost.DecodeFrom(d) - entry.PriceTable.SwapSectorBaseCost.DecodeFrom(d) - entry.PriceTable.WriteBaseCost.DecodeFrom(d) - entry.PriceTable.WriteLengthCost.DecodeFrom(d) - entry.PriceTable.WriteStoreCost.DecodeFrom(d) - entry.PriceTable.TxnFeeMinRecommended.DecodeFrom(d) - entry.PriceTable.TxnFeeMaxRecommended.DecodeFrom(d) - entry.PriceTable.ContractPrice.DecodeFrom(d) - entry.PriceTable.CollateralCost.DecodeFrom(d) - entry.PriceTable.MaxCollateral.DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.UpdatePriceTableCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.AccountBalanceCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.FundAccountCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.LatestRevisionCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.SubscriptionMemoryCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.SubscriptionNotificationCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.InitBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.MemoryTimeCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.DownloadBandwidthCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.UploadBandwidthCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.DropSectorsBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.DropSectorsUnitCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.HasSectorBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.ReadBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.ReadLengthCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.RenewContractCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.RevisionBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.SwapSectorBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.WriteBaseCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.WriteLengthCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.WriteStoreCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.TxnFeeMinRecommended).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.TxnFeeMaxRecommended).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.ContractPrice).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.CollateralCost).DecodeFrom(d) + (*types.V1Currency)(&entry.PriceTable.MaxCollateral).DecodeFrom(d) entry.PriceTable.MaxDuration = d.ReadUint64() entry.PriceTable.WindowSize = d.ReadUint64() entry.PriceTable.RegistryEntriesLeft = d.ReadUint64() @@ -169,7 +169,7 @@ func (hdb *HostDB) load() error { } if len(hdb.filteredHosts) > 0 { - hdb.filteredTree = hosttree.New(hdb.scoreFunc, hdb.staticLog) + hdb.filteredTree = hosttree.New(hdb.scoreFunc, hdb.log) } // "Lazily" load the hosts into the host trees. diff --git a/modules/manager/hostdb/randomhosts.go b/modules/manager/hostdb/randomhosts.go index 50ee17f..745bbc3 100644 --- a/modules/manager/hostdb/randomhosts.go +++ b/modules/manager/hostdb/randomhosts.go @@ -39,7 +39,7 @@ func (hdb *HostDB) RandomHostsWithAllowance(n int, blacklist, addressBlacklist [ return []modules.HostDBEntry{}, ErrInitialScanIncomplete } // Create a temporary hosttree from the given allowance. - ht := hosttree.New(hdb.managedCalculateHostScoreFn(allowance), hdb.staticLog) + ht := hosttree.New(hdb.managedCalculateHostScoreFn(allowance), hdb.log) // Insert all known hosts. hdb.mu.RLock() diff --git a/modules/manager/hostdb/scan.go b/modules/manager/hostdb/scan.go index be04c1e..de50b87 100644 --- a/modules/manager/hostdb/scan.go +++ b/modules/manager/hostdb/scan.go @@ -14,6 +14,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/hostdb/hosttree" "github.com/mike76-dev/sia-satellite/modules/manager/proto" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -77,8 +78,8 @@ func (hdb *HostDB) queueScan(entry modules.HostDBEntry) { // Sanity check - the scan map and the scan list should have the same // length. - if len(hdb.scanMap) > len(hdb.scanList) + maxScanningThreads { - hdb.staticLog.Println("CRITICAL: the hostdb scan map has seemingly grown too large:", len(hdb.scanMap), len(hdb.scanList), maxScanningThreads) + if len(hdb.scanMap) > len(hdb.scanList)+maxScanningThreads { + hdb.log.Error("the hostdb scan map has seemingly grown too large", zap.Int("scanMap", len(hdb.scanMap)), zap.Int("scanList", len(hdb.scanList)), zap.Int("maxScanningThreads", maxScanningThreads)) } // Nobody is emptying the scan list, create and run a scan thread. @@ -171,7 +172,7 @@ func (hdb *HostDB) queueScan(entry modules.HostDBEntry) { // to keep this function in mind, and vice-versa. func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { // If the scan failed because we don't have Internet access, toss out this update. - if netErr != nil && !hdb.gateway.Online() { + if netErr != nil && len(hdb.s.Peers()) == 0 { return } @@ -200,8 +201,8 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { // Add two scans to the scan history. Two are needed because the scans // are forward looking, but we want this first scan to represent as // much as one week of uptime or downtime. - earliestStartTime := time.Now().Add(time.Hour * 2 * 24 * -1) // Permit up two days starting uptime or downtime. - suggestedStartTime := time.Now().Add(time.Minute * 10 * time.Duration(hdb.blockHeight - entry.FirstSeen + 1) * -1) // Add one to the FirstSeen in case FirstSeen is this block, guarantees incrementing order. + earliestStartTime := time.Now().Add(time.Hour * 2 * 24 * -1) // Permit up two days starting uptime or downtime. + suggestedStartTime := time.Now().Add(time.Minute * 10 * time.Duration(hdb.tip.Height-entry.FirstSeen+1) * -1) // Add one to the FirstSeen in case FirstSeen is this block, guarantees incrementing order. if suggestedStartTime.Before(earliestStartTime) { suggestedStartTime = earliestStartTime } @@ -213,10 +214,10 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { // Do not add a new timestamp for the scan unless more than an hour has // passed since the previous scan. newTimestamp := time.Now() - prevTimestamp := newEntry.ScanHistory[len(newEntry.ScanHistory) - 1].Timestamp + prevTimestamp := newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Timestamp if newTimestamp.After(prevTimestamp.Add(scanTimeElapsedRequirement)) { - if newEntry.ScanHistory[len(newEntry.ScanHistory) - 1].Success && netErr != nil { - hdb.staticLog.Printf("Host %v is being downgraded from an online host to an offline host: %v\n", newEntry.PublicKey.String(), netErr) + if newEntry.ScanHistory[len(newEntry.ScanHistory)-1].Success && netErr != nil { + hdb.log.Info(fmt.Sprintf("host %v is being downgraded from an online host to an offline host", newEntry.PublicKey.String()), zap.Error(netErr)) } newEntry.ScanHistory = append(newEntry.ScanHistory, modules.HostDBScan{Timestamp: newTimestamp, Success: netErr == nil}) } @@ -239,15 +240,15 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { downPastMaxDowntime := time.Since(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime && !recentUptime if !haveContractWithHost && downPastMaxDowntime && len(newEntry.ScanHistory) >= minScans { if newEntry.HistoricUptime > 0 { - hdb.staticLog.Printf("Removing %v with historic uptime from hostdb. Recent downtime timestamp is %v. Hostdb knows about %v contracts.", newEntry.PublicKey.String(), newEntry.ScanHistory[0].Timestamp, len(cis)) + hdb.log.Info(fmt.Sprintf("removing %v with historic uptime from hostdb. Recent downtime timestamp is %v. Hostdb knows about %v contracts.", newEntry.PublicKey.String(), newEntry.ScanHistory[0].Timestamp, len(cis))) } // Remove the host from the hostdb. err := hdb.remove(newEntry.PublicKey) if err != nil { - hdb.staticLog.Println("ERROR: unable to remove host newEntry which has had a ton of downtime:", err) + hdb.log.Error("unable to remove host newEntry which has had a ton of downtime", zap.Error(err)) } if err = hdb.removeHost(newEntry); err != nil { - hdb.staticLog.Println("ERROR: unable to remove host from the database:", err) + hdb.log.Error("unable to remove host from the database", zap.Error(err)) } // The function should terminate here as no more interaction is needed @@ -256,7 +257,7 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { } // Compress any old scans into the historic values. - for len(newEntry.ScanHistory) > minScans && time.Now().Sub(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime { + for len(newEntry.ScanHistory) > minScans && time.Since(newEntry.ScanHistory[0].Timestamp) > maxHostDowntime { timePassed := newEntry.ScanHistory[1].Timestamp.Sub(newEntry.ScanHistory[0].Timestamp) if newEntry.ScanHistory[0].Success { newEntry.HistoricUptime += timePassed @@ -271,20 +272,20 @@ func (hdb *HostDB) updateEntry(entry modules.HostDBEntry, netErr error) { // Insert into Hosttrees. err := hdb.insert(newEntry) if err != nil { - hdb.staticLog.Println("ERROR: unable to insert entry which is thought to be new:", err) + hdb.log.Error("unable to insert entry which is thought to be new", zap.Error(err)) } } else { // Modify hosttrees. err := hdb.modify(newEntry) if err != nil { - hdb.staticLog.Println("ERROR: unable to modify entry which is thought to exist:", err) + hdb.log.Error("unable to modify entry which is thought to exist", zap.Error(err)) } } if err := hdb.updateHost(newEntry); err != nil { - hdb.staticLog.Println("ERROR: unable to update the host in the database:", err) + hdb.log.Error("unable to update the host in the database", zap.Error(err)) } if err := hdb.updateScanHistory(newEntry); err != nil { - hdb.staticLog.Println("ERROR: unable to update the scan history:", err) + hdb.log.Error("unable to update the scan history", zap.Error(err)) } } @@ -336,12 +337,12 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { entry.LastIPNetChange = time.Now() } if err != nil { - hdb.staticLog.Println("ERROR: managedScanHost: failed to look up IP nets", err) + hdb.log.Error("managedScanHost: failed to look up IP nets", zap.Error(err)) } // Update historic interactions of entry if necessary. hdb.mu.Lock() - updateHostHistoricInteractions(&entry, hdb.blockHeight) + updateHostHistoricInteractions(&entry, hdb.tip.Height) // We don't want to override the NetAddress during a scan so we need to // retrieve the most recent NetAddress from the tree first. @@ -355,14 +356,14 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { timeout := hostRequestTimeout hdb.mu.RLock() if len(hdb.initialScanLatencies) > minScansForSpeedup { - hdb.staticLog.Println("CRITICAL: initialScanLatencies should never be greater than minScansForSpeedup") + hdb.log.Error("initialScanLatencies should never be greater than minScansForSpeedup") } if !hdb.initialScanComplete && len(hdb.initialScanLatencies) == minScansForSpeedup { // During an initial scan, when we have at least minScansForSpeedup // active scans in initialScanLatencies, we use // 5*median(initialScanLatencies) as the new hostRequestTimeout to // speedup the scanning process. - timeout = hdb.initialScanLatencies[len(hdb.initialScanLatencies) / 2] + timeout = hdb.initialScanLatencies[len(hdb.initialScanLatencies)/2] timeout *= scanSpeedupMedianMultiplier if hostRequestTimeout < timeout { timeout = hostRequestTimeout @@ -371,7 +372,7 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { hdb.mu.RUnlock() // Create a context and set up its cancelling. - ctx, cancel := context.WithTimeout(context.Background(), timeout + hostScanDeadline) + ctx, cancel := context.WithTimeout(context.Background(), timeout+hostScanDeadline) connCloseChan := make(chan struct{}) go func() { select { @@ -398,7 +399,7 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { if exists { settings.NetAddress = oldEntry.Settings.NetAddress } - err = proto.WithTransportV3(ctx, settings.SiamuxAddr(), pubKey, func (t *rhpv3.Transport) error { + err = proto.WithTransportV3(ctx, settings.SiamuxAddr(), pubKey, func(t *rhpv3.Transport) error { var err error pt, err = proto.RPCPriceTable(ctx, t, func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) { return nil, nil @@ -411,7 +412,7 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { return nil }() if err != nil { - hdb.staticLog.Printf("INFO: scan of host at %v failed: %v\n", pubKey, err) + hdb.log.Info(fmt.Sprintf("scan of host at %v failed", pubKey), zap.Error(err)) } else { entry.Settings = settings entry.PriceTable = pt @@ -459,7 +460,7 @@ func (hdb *HostDB) threadedProbeHosts(scanPool <-chan modules.HostDBEntry) { // Block until hostdb has internet connectivity. for { hdb.mu.RLock() - online := hdb.gateway.Online() + online := len(hdb.s.Peers()) > 0 hdb.mu.RUnlock() if online { break @@ -483,7 +484,7 @@ func (hdb *HostDB) threadedProbeHosts(scanPool <-chan modules.HostDBEntry) { func (hdb *HostDB) threadedScan() { err := hdb.tg.Add() if err != nil { - hdb.staticLog.Println("ERROR: couldn't start hostdb threadgroup:", err) + hdb.log.Error("couldn't start hostdb threadgroup", zap.Error(err)) return } defer hdb.tg.Done() @@ -491,7 +492,7 @@ func (hdb *HostDB) threadedScan() { // Wait until the consensus set is synced. Only then we can be sure that // the initial scan covers the whole network. for { - if hdb.managedSynced() { + if hdb.tip == hdb.cm.Tip() { break } select { @@ -522,15 +523,13 @@ func (hdb *HostDB) threadedScan() { hdb.initialScanComplete = true err = hdb.updateState() if err != nil { - hdb.staticLog.Println("ERROR: couldn't save hostdb state:", err) + hdb.log.Error("couldn't save hostdb state", zap.Error(err)) return } // Copy the known contracts to avoid having to lock the hdb later. knownContracts := make(map[string][]contractInfo) for k, cis := range hdb.knownContracts { - for _, ci := range cis { - knownContracts[k] = append(knownContracts[k], ci) - } + knownContracts[k] = append(knownContracts[k], cis...) } hdb.mu.Unlock() @@ -554,7 +553,7 @@ func (hdb *HostDB) threadedScan() { // Figure out if the host is online or offline. host := allHosts[i] - online := len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory) - 1].Success + online := len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success _, known := knownContracts[host.PublicKey.String()] if known { knownHosts = append(knownHosts, host) @@ -566,7 +565,7 @@ func (hdb *HostDB) threadedScan() { } // Queue the scans for each host. - hdb.staticLog.Println("INFO: performing scan on", len(onlineHosts), "online hosts and", len(offlineHosts), "offline hosts and", len(knownHosts), "known hosts.") + hdb.log.Info(fmt.Sprintf("performing scan on %d online hosts, %d offline hosts, and %d known hosts", len(onlineHosts), len(offlineHosts), len(knownHosts))) hdb.mu.Lock() for _, host := range knownHosts { hdb.queueScan(host) diff --git a/modules/manager/hostdb/update.go b/modules/manager/hostdb/update.go index c9a4dc6..0e76fe3 100644 --- a/modules/manager/hostdb/update.go +++ b/modules/manager/hostdb/update.go @@ -1,11 +1,14 @@ package hostdb import ( + "fmt" "time" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) // findHostAnnouncements returns a list of the host announcements found within @@ -28,6 +31,20 @@ func findHostAnnouncements(b types.Block) (announcements []modules.HostDBEntry) announcements = append(announcements, host) } } + for _, t := range b.V2Transactions() { + for _, at := range t.Attestations { + addr, pubKey, err := modules.DecodeV2Announcement(at) + if err != nil { + continue + } + + // Add the announcement to the slice being returned. + var host modules.HostDBEntry + host.Settings.NetAddress = string(addr) + host.PublicKey = pubKey + announcements = append(announcements, host) + } + } return } @@ -37,7 +54,7 @@ func findHostAnnouncements(b types.Block) (announcements []modules.HostDBEntry) func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) { // Remove garbage hosts and local hosts. if err := modules.NetAddress(host.Settings.NetAddress).IsValid(); err != nil { - hdb.staticLog.Printf("WARN: host '%v' has an invalid NetAddress: %v\n", host.Settings.NetAddress, err) + hdb.log.Warn(fmt.Sprintf("host '%v' has an invalid NetAddress", host.Settings.NetAddress), zap.Error(err)) return } // Ignore all local hosts announced through the blockchain. @@ -56,9 +73,9 @@ func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) { // a zero-value FirstSeen field. oldEntry.Settings.NetAddress = host.Settings.NetAddress if oldEntry.FirstSeen == 0 { - oldEntry.FirstSeen = hdb.blockHeight + oldEntry.FirstSeen = hdb.tip.Height } - oldEntry.LastAnnouncement = hdb.blockHeight + oldEntry.LastAnnouncement = hdb.tip.Height // Resolve the host's used subnets and update the timestamp if they // changed. We only update the timestamp if resolving the ipNets was @@ -72,27 +89,27 @@ func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) { // Modify hosttree. err = hdb.modify(oldEntry) if err != nil { - hdb.staticLog.Println("ERROR: unable to modify host entry of host tree after a blockchain scan:", err) + hdb.log.Error("unable to modify host entry of host tree after a blockchain scan", zap.Error(err)) } // Update the database. err = hdb.updateHost(oldEntry) if err != nil { - hdb.staticLog.Println("ERROR: unable to update host entry in the database:", err) + hdb.log.Error("unable to update host entry in the database", zap.Error(err)) } } else { - host.FirstSeen = hdb.blockHeight + host.FirstSeen = hdb.tip.Height // Insert into hosttree. err := hdb.insert(host) if err != nil { - hdb.staticLog.Println("ERROR: unable to insert host entry into host tree after a blockchain scan:", err) + hdb.log.Error("unable to insert host entry into host tree after a blockchain scan", zap.Error(err)) } // Update the database. err = hdb.updateHost(host) if err != nil { - hdb.staticLog.Println("ERROR: unable to insert host entry into database:", err) + hdb.log.Error("unable to insert host entry into database", zap.Error(err)) } } @@ -100,27 +117,23 @@ func (hdb *HostDB) insertBlockchainHost(host modules.HostDBEntry) { hdb.queueScan(host) } -// ProcessConsensusChange will be called by the consensus set every time there -// is a change in the blockchain. Updates will always be called in order. -func (hdb *HostDB) ProcessConsensusChange(cc modules.ConsensusChange) { +// UpdateChainState applies the ChainManager updates. +func (hdb *HostDB) UpdateChainState(_ []chain.RevertUpdate, applied []chain.ApplyUpdate) error { hdb.mu.Lock() defer hdb.mu.Unlock() - // Set the block height before applying blocks to preserve previous - // behavior. - hdb.blockHeight = cc.BlockHeight + for _, cau := range applied { + hdb.tip = cau.State.Index - // Add hosts announced in blocks that were applied. - for _, block := range cc.AppliedBlocks { - for _, host := range findHostAnnouncements(block) { + for _, host := range findHostAnnouncements(cau.Block) { hdb.insertBlockchainHost(host) } } - hdb.synced = cc.Synced - hdb.lastChange = cc.ID - err := hdb.updateState() - if err != nil { - hdb.staticLog.Println("ERROR: unable to save hostdb state:", err) + if err := hdb.updateState(); err != nil { + hdb.log.Error("unable to save hostdb state", zap.Error(err)) + return err } + + return nil } diff --git a/modules/manager/manager.go b/modules/manager/manager.go index 0436326..60aac9f 100644 --- a/modules/manager/manager.go +++ b/modules/manager/manager.go @@ -14,19 +14,10 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/modules/manager/contractor" "github.com/mike76-dev/sia-satellite/modules/manager/hostdb" - "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" -) - -var ( - // Nil dependency errors. - errNilDB = errors.New("manager cannot use a nil database") - errNilMail = errors.New("manager cannot use a nil mail client") - errNilCS = errors.New("manager cannot use a nil state") - errNilTpool = errors.New("manager cannot use a nil transaction pool") - errNilWallet = errors.New("manager cannot use a nil wallet") - errNilGateway = errors.New("manager cannot use nil gateway") + "go.sia.tech/coreutils/chain" ) // A directory for storing temporary files. @@ -35,8 +26,6 @@ const bufferedFilesDir = "temp" // A hostContractor negotiates, revises, renews, and provides access to file // contracts. type hostContractor interface { - modules.Alerter - // SetAllowance sets the amount of money the contractor is allowed to // spend on contracts over a given time period, divided among the number // of hosts specified. Note that contractor can start forming contracts as @@ -175,10 +164,10 @@ type Manager struct { // Dependencies. db *sql.DB ms mail.MailSender - cs modules.ConsensusSet + cm *chain.Manager + s modules.Syncer hostContractor hostContractor hostDB modules.HostDB - tpool modules.TransactionPool wallet modules.Wallet // Atomic properties. @@ -206,45 +195,18 @@ type Manager struct { syncing bool // Utilities. - log *persist.Logger - mu sync.RWMutex - tg siasync.ThreadGroup - staticAlerter *modules.GenericAlerter - dir string + log *zap.Logger + mu sync.RWMutex + tg siasync.ThreadGroup + dir string } // New returns an initialized Manager. -func New(db *sql.DB, ms mail.MailSender, cs modules.ConsensusSet, g modules.Gateway, tpool modules.TransactionPool, wallet modules.Wallet, dir string, name string) (*Manager, <-chan error) { +func New(db *sql.DB, ms mail.MailSender, cm *chain.Manager, s modules.Syncer, wallet modules.Wallet, dir string, name string) (*Manager, <-chan error) { errChan := make(chan error, 1) - // Check that all the dependencies were provided. - if db == nil { - errChan <- errNilDB - return nil, errChan - } - if ms == nil { - errChan <- errNilMail - return nil, errChan - } - if cs == nil { - errChan <- errNilCS - return nil, errChan - } - if g == nil { - errChan <- errNilGateway - return nil, errChan - } - if tpool == nil { - errChan <- errNilTpool - return nil, errChan - } - if wallet == nil { - errChan <- errNilWallet - return nil, errChan - } - // Create the HostDB object. - hdb, errChanHDB := hostdb.New(db, g, cs, dir) + hdb, errChanHDB := hostdb.New(db, cm, s, dir) if err := modules.PeekErr(errChanHDB); err != nil { errChan <- err return nil, errChan @@ -253,22 +215,21 @@ func New(db *sql.DB, ms mail.MailSender, cs modules.ConsensusSet, g modules.Gate // Create the Manager object. m := &Manager{ name: name, - cs: cs, + cm: cm, db: db, ms: ms, hostDB: hdb, - tpool: tpool, + s: s, wallet: wallet, exchRates: make(map[string]float64), multipartUploads: make(map[types.Hash256]struct{}), - staticAlerter: modules.NewAlerter("manager"), - dir: dir, + dir: dir, } // Create the Contractor. - hc, errChanContractor := contractor.New(db, cs, m, tpool, wallet, hdb, dir) + hc, errChanContractor := contractor.New(db, cm, s, m, wallet, hdb, dir) if err := modules.PeekErr(errChanContractor); err != nil { errChan <- err return nil, errChan @@ -520,8 +481,8 @@ func (m *Manager) PriceEstimation(allowance modules.Allowance, invoicing bool) ( // Add the cost of paying the transaction fees and then double the contract // costs to account for renewing a full set of contracts. - _, feePerByte := m.tpool.FeeEstimation() - txnsFees := feePerByte.Mul64(modules.EstimatedFileContractTransactionSetSize).Mul64(allowance.Hosts).Mul64(3) + feePerByte := m.cm.RecommendedFee() + txnsFees := feePerByte.Mul64(2048).Mul64(allowance.Hosts).Mul64(3) totalContractCost = totalContractCost.Add(txnsFees) totalContractCost = totalContractCost.Mul64(2) @@ -568,7 +529,7 @@ func (m *Manager) PriceEstimation(allowance modules.Allowance, invoicing bool) ( // estimate the renter to spend all of its allowance so the Siafund fee // will be calculated on the sum of the allowance and the hosts collateral. totalPayout := totalCost.Add(hostCollateral) - siafundFee := modules.Tax(m.cs.Height(), totalPayout) + siafundFee := modules.Tax(m.cm.Tip().Height, totalPayout) totalContractCost = totalContractCost.Add(siafundFee) // Increase estimates by a factor of safety to account for host churn and @@ -618,7 +579,7 @@ func (m *Manager) ContractPriceEstimation(hpk types.PublicKey, endHeight uint64, return types.ZeroCurrency, 0, errors.New("host filtered") } - height := m.cs.Height() + height := m.cm.Tip().Height period := endHeight - height contractCost := host.Settings.ContractPrice downloadCost := host.Settings.DownloadBandwidthPrice @@ -636,8 +597,8 @@ func (m *Manager) ContractPriceEstimation(hpk types.PublicKey, endHeight uint64, // Add the cost of paying the transaction fees and then double the contract // cost to account for renewing. - _, feePerByte := m.tpool.FeeEstimation() - txnsFees := feePerByte.Mul64(modules.EstimatedFileContractTransactionSetSize).Mul64(3) + feePerByte := m.cm.RecommendedFee() + txnsFees := feePerByte.Mul64(2048).Mul64(3) contractCost = contractCost.Add(txnsFees) contractCost = contractCost.Mul64(2) @@ -904,7 +865,7 @@ func (m *Manager) LockSiacoins(email string, amount float64) error { } amountWithFee := amount * (1 + fee) if !ub.Subscribed && amountWithFee > ub.Balance { - m.log.Println("WARN: trying to lock more than the available balance") + m.log.Warn("trying to lock more than the available balance") amountWithFee = ub.Balance } @@ -948,7 +909,7 @@ func (m *Manager) UnlockSiacoins(email string, amount, total float64, height uin unlocked := amount burned := total - amount if total > ub.Locked { - m.log.Println("WARN: trying to unlock more than the locked balance") + m.log.Warn("trying to unlock more than the locked balance") if burned < ub.Locked { unlocked = ub.Locked - burned } else { @@ -1037,18 +998,12 @@ func (m *Manager) RetrieveSpendings(email string, currency string) ([]modules.Us // BlockHeight returns the current block height. func (m *Manager) BlockHeight() uint64 { - return m.cs.Height() + return m.cm.Tip().Height } // FeeEstimation returns the minimum and the maximum estimated fees for // a transaction. -func (m *Manager) FeeEstimation() (min, max types.Currency) { return m.tpool.FeeEstimation() } - -// GetWalletSeed returns the wallet seed. -func (m *Manager) GetWalletSeed() (seed modules.Seed, err error) { - seed, _, err = m.wallet.PrimarySeed() - return -} +func (m *Manager) FeeEstimation() types.Currency { return m.cm.RecommendedFee() } // DeleteMetadata deletes the renter's saved file metadata. func (m *Manager) DeleteMetadata(pk types.PublicKey) error { @@ -1247,7 +1202,7 @@ func (m *Manager) GetEmailPreferences() (string, types.Currency) { func (m *Manager) SetEmailPreferences(email string, threshold types.Currency) error { err := m.setEmailPreferences(email, threshold) if err != nil { - m.log.Println("ERROR: couldn't save email preferences:", err) + m.log.Error("couldn't save email preferences", zap.Error(err)) } return err } diff --git a/modules/manager/persist.go b/modules/manager/persist.go index 7edfd6e..b14eb07 100644 --- a/modules/manager/persist.go +++ b/modules/manager/persist.go @@ -1,13 +1,13 @@ package manager import ( - "fmt" "path/filepath" "time" - "github.com/mike76-dev/sia-satellite/internal/sync" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/persist" + "go.sia.tech/core/types" + "go.uber.org/zap" ) const ( @@ -52,29 +52,26 @@ func (m *Manager) syncDB() { // Commit the existing tx. err := m.dbTx.Commit() if err != nil { - m.log.Severe("ERROR: failed to apply database update:", err) + m.log.Error("failed to apply database update", zap.Error(err)) m.dbTx.Rollback() } // Begin a new tx. m.dbTx, err = m.db.Begin() if err != nil { - m.log.Severe("ERROR: failed to initialize a db transaction:", err) + m.log.Error("failed to initialize a db transaction", zap.Error(err)) } } // initPersist initializes the database. func (m *Manager) initPersist(dir string) error { // Create the logger. - var err error - m.log, err = persist.NewFileLogger(filepath.Join(dir, logFile)) + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, logFile)) if err != nil { return modules.AddContext(err, "unable to initialize the Manager's logger") } + m.log = logger m.tg.AfterStop(func() { - err := m.log.Close() - if err != nil { - fmt.Println("Unable to close the Manager's logger:", err) - } + closeFn() }) // Load email preferences. @@ -112,7 +109,7 @@ func (m *Manager) initPersist(dir string) error { err := m.dbTx.Commit() m.mu.Unlock() if err != nil { - m.log.Println("ERROR: unable to close transaction properly during shutdown:", err) + m.log.Error("unable to close transaction properly during shutdown", zap.Error(err)) } }) @@ -143,18 +140,34 @@ func (m *Manager) initPersist(dir string) error { // Subscribe to the consensus set using the most recent consensus change. go func() { - err := m.cs.ConsensusSetSubscribe(m, modules.ConsensusChangeRecent, m.tg.StopChan()) - if modules.ContainsError(err, sync.ErrStopped) { - return - } + err := m.sync(m.cm.Tip()) if err != nil { - m.log.Critical(err) + m.log.Error("failed to subscribe", zap.Error(err)) return } }() - m.tg.OnStop(func() { - m.cs.Unsubscribe(m) - }) return nil } + +func (m *Manager) sync(index types.ChainIndex) error { + for index != m.cm.Tip() { + select { + case <-m.tg.StopChan(): + return nil + default: + } + crus, caus, err := m.cm.UpdatesSince(index, 1000) + if err != nil { + m.log.Error("failed to subscribe to chain manager", zap.Error(err)) + return err + } else if err := m.UpdateChainState(crus, caus); err != nil { + m.log.Error("failed to update chain state", zap.Error(err)) + return err + } + if len(caus) > 0 { + index = caus[len(caus)-1].State.Index + } + } + return nil +} diff --git a/modules/manager/proto/proto.go b/modules/manager/proto/proto.go index 1eb4a05..f743ba6 100644 --- a/modules/manager/proto/proto.go +++ b/modules/manager/proto/proto.go @@ -6,6 +6,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" + "go.sia.tech/core/consensus" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" @@ -13,7 +14,7 @@ import ( // transactionSigner is the minimal interface for modules.Wallet. type transactionSigner interface { - Sign(*types.Transaction, []types.Hash256, types.CoveredFields) error + Sign(cs consensus.State, txn *types.Transaction, toSign []types.Hash256) error } // HostSettings uses the Settings RPC to retrieve the host's settings. diff --git a/modules/manager/proto/rhpv3.go b/modules/manager/proto/rhpv3.go index 9c42cff..ef7f34d 100644 --- a/modules/manager/proto/rhpv3.go +++ b/modules/manager/proto/rhpv3.go @@ -12,6 +12,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" + "go.sia.tech/core/consensus" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" @@ -90,7 +91,7 @@ func processPayment(s *rhpv3.Stream, payment rhpv3.PaymentMethod) error { } // RPCRenewContract negotiates a contract renewal with the host. -func RPCRenewContract(ctx context.Context, t *rhpv3.Transport, renterKey types.PrivateKey, rev types.FileContractRevision, txnSet []types.Transaction, toSign []types.Hash256, ts transactionSigner) (_ rhpv2.ContractRevision, _ []types.Transaction, err error) { +func RPCRenewContract(ctx context.Context, t *rhpv3.Transport, renterKey types.PrivateKey, rev types.FileContractRevision, txnSet []types.Transaction, toSign []types.Hash256, ts transactionSigner, cs consensus.State) (_ rhpv2.ContractRevision, _ []types.Transaction, err error) { s := t.DialStream() defer s.Close() s.SetDeadline(time.Now().Add(5 * time.Minute)) @@ -159,13 +160,8 @@ func RPCRenewContract(ctx context.Context, t *rhpv3.Transport, renterKey types.P } txn.Signatures = []types.TransactionSignature{finalRevRenterSig, finalRevHostSig} - // Sign the inputs we funded the txn with and cover the whole txn including - // the existing signatures. - cf := types.CoveredFields{ - WholeTransaction: true, - Signatures: []uint64{0, 1}, - } - if err := ts.Sign(&txn, toSign, cf); err != nil { + // Sign the inputs we funded the txn with. + if err := ts.Sign(cs, &txn, toSign); err != nil { return rhpv2.ContractRevision{}, nil, modules.AddContext(err, "failed to sign transaction") } @@ -231,7 +227,7 @@ func RPCRenewContract(ctx context.Context, t *rhpv3.Transport, renterKey types.P // RPCTrustlessRenewContract negotiates a contract renewal with the host // using the new Renter-Satellite protocol. -func RPCTrustlessRenewContract(ctx context.Context, ss *modules.RPCSession, t *rhpv3.Transport, txnSet []types.Transaction, toSign []types.Hash256, ts transactionSigner) (_ rhpv2.ContractRevision, _ []types.Transaction, err error) { +func RPCTrustlessRenewContract(ctx context.Context, ss *modules.RPCSession, t *rhpv3.Transport, txnSet []types.Transaction, toSign []types.Hash256, ts transactionSigner, cs consensus.State) (_ rhpv2.ContractRevision, _ []types.Transaction, err error) { s := t.DialStream() defer s.Close() s.SetDeadline(time.Now().Add(5 * time.Minute)) @@ -313,11 +309,7 @@ func RPCTrustlessRenewContract(ctx context.Context, ss *modules.RPCSession, t *r // Add the revision signatures to the transaction set and sign it. txn.Signatures = []types.TransactionSignature{finalRevRenterSig, finalRevHostSig} - cf := types.CoveredFields{ - WholeTransaction: true, - Signatures: []uint64{0, 1}, - } - if err := ts.Sign(&txn, toSign, cf); err != nil { + if err := ts.Sign(cs, &txn, toSign); err != nil { return rhpv2.ContractRevision{}, nil, modules.AddContext(err, "failed to sign transaction") } diff --git a/modules/manager/stripe.go b/modules/manager/stripe.go index a34673f..94926c4 100644 --- a/modules/manager/stripe.go +++ b/modules/manager/stripe.go @@ -1,6 +1,7 @@ package manager import ( + "fmt" "strings" "github.com/mike76-dev/sia-satellite/modules" @@ -10,6 +11,7 @@ import ( "github.com/stripe/stripe-go/v75/invoiceitem" "github.com/stripe/stripe-go/v75/price" "github.com/stripe/stripe-go/v75/product" + "go.uber.org/zap" ) // threadedSettleAccounts tries to settle the outstanding balances. @@ -24,7 +26,7 @@ func (m *Manager) threadedSettleAccounts() { // Get the account balance. ub, err := m.GetBalance(renter.Email) if err != nil { - m.log.Printf("ERROR: couldn't retrieve account balance of %v: %v\n", renter.Email, err) + m.log.Error(fmt.Sprintf("couldn't retrieve account balance of %v", renter.Email), zap.Error(err)) continue } @@ -41,14 +43,14 @@ func (m *Manager) threadedSettleAccounts() { // Sanity check: ub.StripeID shouldn't be empty. if ub.StripeID == "" { - m.log.Println("ERROR: Stripe ID not found at", renter.Email) + m.log.Error(fmt.Sprintf("Stripe ID not found at %s", renter.Email)) continue } // Issue an invoice. err = m.managedCreateInvoice(ub.StripeID, ub.Currency, -ub.Balance*ub.SCRate) if err != nil { - m.log.Printf("ERROR: couldn't create invoice for %v: %v\n", renter.Email, err) + m.log.Error(fmt.Sprintf("couldn't create invoice for %v", renter.Email), zap.Error(err)) } } } diff --git a/modules/manager/update.go b/modules/manager/update.go index 8aeb091..bd421ed 100644 --- a/modules/manager/update.go +++ b/modules/manager/update.go @@ -10,8 +10,10 @@ import ( "github.com/mike76-dev/sia-satellite/external" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) const ( @@ -72,7 +74,7 @@ func (m *Manager) calculateAverages() { hosts, err := m.ActiveHosts() if err != nil { - m.log.Println("ERROR: could not fetch active hosts", err) + m.log.Error("could not fetch active hosts", zap.Error(err)) return } @@ -117,7 +119,7 @@ func (m *Manager) calculateAverages() { // Save to disk. if err := dbPutAverages(m.dbTx, m.hostAverages); err != nil { - m.log.Println("ERROR: couldn't save network averages:", err) + m.log.Error("couldn't save network averages", zap.Error(err)) } } @@ -144,7 +146,7 @@ func (m *Manager) threadedCalculateAverages() { func (m *Manager) fetchExchangeRates() { data, err := external.FetchSCRates() if err != nil { - m.log.Println("ERROR:", err) + m.log.Error("couldn't fetch exchange rates", zap.Error(err)) return } @@ -233,9 +235,8 @@ const reportTemplate = ` ` -// ProcessConsensusChange gets called to inform Manager about the -// changes in the consensus set. -func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { +// UpdateChainState applies the updates from the ChainManager. +func (m *Manager) UpdateChainState(_ []chain.RevertUpdate, applied []chain.ApplyUpdate) (err error) { // Define a helper. convertSize := func(size uint64) string { if size < 1024 { @@ -255,8 +256,8 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { return fmt.Sprintf("%.2f %s", s, sizes[i]) } - // Process the applied blocks till the first found in the following month. - for _, block := range cc.AppliedBlocks { + for _, cau := range applied { + block := cau.Block m.mu.Lock() m.lastBlockTimestamp = block.Timestamp currentMonth := m.currentMonth.Timestamp.Month() @@ -267,13 +268,13 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { m.mu.Lock() m.prevMonth = m.currentMonth m.currentMonth = blockTimestamp{ - BlockHeight: cc.BlockHeight, + BlockHeight: cau.State.Index.Height, Timestamp: block.Timestamp, } err := dbPutBlockTimestamps(m.dbTx, m.currentMonth, m.prevMonth) m.mu.Unlock() if err != nil { - m.log.Println("ERROR: couldn't save block timestamps", err) + m.log.Error("couldn't save block timestamps", zap.Error(err)) } // Calculate the monthly spendings of each renter. @@ -283,12 +284,12 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { for _, renter := range renters { ub, err := m.GetBalance(renter.Email) if err != nil { - m.log.Println("ERROR: couldn't retrieve balance:", err) + m.log.Error("couldn't retrieve balance", zap.Error(err)) continue } us, err := m.GetSpendings(renter.Email, int(currentMonth), currentYear) if err != nil { - m.log.Println("ERROR: couldn't retrieve renter spendings:", err) + m.log.Error("couldn't retrieve renter spendings", zap.Error(err)) continue } formed += us.Formed @@ -323,7 +324,7 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { } count, data, err := m.numSlabs(renter.PublicKey) if err != nil { - m.log.Println("ERROR: couldn't retrieve slab count:", err) + m.log.Error("couldn't retrieve slab count", zap.Error(err)) continue } var storageFee, dataFee float64 @@ -346,11 +347,11 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { us.Overhead += partialCost err = m.UpdateSpendings(renter.Email, us, int(currentMonth), currentYear) if err != nil { - m.log.Println("ERROR: couldn't update spendings:", err) + m.log.Error("couldn't update spendings", zap.Error(err)) } if ub.OnHold > 0 && ub.OnHold < uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds())) { // Account on hold, delete the file metadata. - m.log.Println("WARN: account on hold, deleting stored metadata") + m.log.Warn("account on hold, deleting stored metadata") m.DeleteBufferedFiles(renter.PublicKey) m.DeleteMultipartUploads(renter.PublicKey) m.DeleteMetadata(renter.PublicKey) @@ -359,7 +360,7 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { // Deduct from the account balance. if !ub.Subscribed && ub.Balance < storageCost+partialCost { // Insufficient balance, delete the file metadata. - m.log.Println("WARN: insufficient account balance, deleting stored metadata") + m.log.Warn("insufficient account balance, deleting stored metadata") m.DeleteBufferedFiles(renter.PublicKey) m.DeleteMultipartUploads(renter.PublicKey) m.DeleteMetadata(renter.PublicKey) @@ -367,7 +368,7 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { } ub.Balance -= (storageCost + partialCost) if err := m.UpdateBalance(renter.Email, ub); err != nil { - m.log.Println("ERROR: couldn't update balance", err) + m.log.Error("couldn't update balance", zap.Error(err)) } } @@ -401,7 +402,7 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { t := template.New("report") t, err := t.Parse(reportTemplate) if err != nil { - m.log.Printf("ERROR: unable to parse HTML template: %v\n", err) + m.log.Error("unable to parse HTML template", zap.Error(err)) return } var b bytes.Buffer @@ -428,26 +429,27 @@ func (m *Manager) ProcessConsensusChange(cc modules.ConsensusChange) { }) err = m.ms.SendMail("Sia Satellite", m.email, "Your Monthly Report", &b) if err != nil { - m.log.Println("ERROR: unable to send a monthly report:", err) + m.log.Error("unable to send monthly report", zap.Error(err)) } }() // Delete old spendings records from the database. err = m.deleteOldSpendings() if err != nil { - m.log.Println("ERROR: couldn't delete old spendings:", err) + m.log.Error("couldn't delete old spendings", zap.Error(err)) } // Spin a thread to invoice the subscribed accounts. go m.threadedSettleAccounts() m.syncDB() - break } } // Send a warning email if the wallet balance becomes low. m.sendWarning() + + return nil } // outOfSyncTemplate contains the text send by email when the last @@ -498,18 +500,18 @@ func (m *Manager) sendOutOfSyncWarning() { t := template.New("warning") t, err := t.Parse(outOfSyncTemplate) if err != nil { - m.log.Printf("ERROR: unable to parse HTML template: %v\n", err) + m.log.Error("unable to parse HTML template", zap.Error(err)) return } var b bytes.Buffer t.Execute(&b, warning{ Name: m.name, - Height: m.cs.Height(), + Height: m.cm.Tip().Height, Since: fmt.Sprintf("%dh%dm", hours, minutes), }) err = m.ms.SendMail("Sia Satellite", m.email, "Out Of Sync Warning", &b) if err != nil { - m.log.Println("ERROR: unable to send a warning:", err) + m.log.Error("unable to send warning", zap.Error(err)) return } diff --git a/modules/modules.go b/modules/modules.go index 875cda3..3488819 100644 --- a/modules/modules.go +++ b/modules/modules.go @@ -1,8 +1,6 @@ package modules import ( - "crypto/ed25519" - "go.sia.tech/core/types" "lukechampine.com/frand" @@ -18,16 +16,15 @@ var ( MaxSectorAccessPrice = types.HastingsPerSiacoin.Div64(1e5) ) -// DeriveRenterSeed derives a seed to be used by the renter for accessing the +// DeriveRenterKey derives a key to be used by the renter for accessing the // file contracts. -// NOTE: The seed returned by this function should be wiped once it's no longer +// NOTE: The key returned by this function should be wiped once it's no longer // in use. -func DeriveRenterSeed(walletSeed Seed, email string) []byte { - renterSeed := make([]byte, ed25519.SeedSize) - rs := types.HashBytes(append(walletSeed[:], []byte(email)...)) +func DeriveRenterKey(masterKey types.PrivateKey, email string) []byte { + rs := types.HashBytes(append(masterKey, []byte(email)...)) defer frand.Read(rs[:]) - copy(renterSeed, rs[:]) - return renterSeed + pk := types.NewPrivateKeyFromSeed(rs[:]) + return pk } // DeriveEphemeralKey derives a secret key to be used by the renter for the diff --git a/modules/portal.go b/modules/portal.go index 90f183e..652b893 100644 --- a/modules/portal.go +++ b/modules/portal.go @@ -14,8 +14,6 @@ type CreditData struct { // Portal implements the portal server. type Portal interface { - Alerter - // Close safely shuts down the portal. Close() error diff --git a/modules/portal/alert.go b/modules/portal/alert.go deleted file mode 100644 index 35c2f2b..0000000 --- a/modules/portal/alert.go +++ /dev/null @@ -1,8 +0,0 @@ -package portal - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the portal. -func (p *Portal) Alerts() (crit, err, warn, info []modules.Alert) { - return p.staticAlerter.Alerts() -} diff --git a/modules/portal/auth.go b/modules/portal/auth.go index 4209aef..13ca411 100644 --- a/modules/portal/auth.go +++ b/modules/portal/auth.go @@ -10,6 +10,7 @@ import ( "time" "github.com/julienschmidt/httprouter" + "go.uber.org/zap" ) const ( @@ -108,7 +109,7 @@ func (api *portalAPI) loginHandlerPOST(w http.ResponseWriter, req *http.Request, // Check if the user account exists. exists, cErr := api.portal.userExists(email) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -138,7 +139,7 @@ func (api *portalAPI) loginHandlerPOST(w http.ResponseWriter, req *http.Request, // Check if the account is verified and the password is correct. verified, passwordOK, cErr := api.portal.isVerified(email, password) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -171,7 +172,7 @@ func (api *portalAPI) loginHandlerPOST(w http.ResponseWriter, req *http.Request, t := time.Now().Add(7 * 24 * time.Hour) token, tErr := api.portal.generateToken(cookiePrefix, email, t) if tErr != nil { - api.portal.log.Printf("ERROR: error generating token: %v\n", tErr) + api.portal.log.Error("error generating token", zap.Error(tErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -223,7 +224,7 @@ func (api *portalAPI) registerHandlerPOST(w http.ResponseWriter, req *http.Reque // Check if the email address is already registered. exists, cErr := api.portal.userExists(email) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -235,7 +236,7 @@ func (api *portalAPI) registerHandlerPOST(w http.ResponseWriter, req *http.Reque // Check if the account is verified already. verified, passwordOK, cErr := api.portal.isVerified(email, password) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -277,7 +278,7 @@ func (api *portalAPI) registerHandlerPOST(w http.ResponseWriter, req *http.Reque // Create a new account. if cErr := api.portal.updateAccount(email, password, false); cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -310,7 +311,7 @@ func (api *portalAPI) sendVerificationLinkByMail(w http.ResponseWriter, req *htt // Generate a verification link. token, err := api.portal.generateToken(verifyPrefix, email, time.Now().Add(24*time.Hour)) if err != nil { - api.portal.log.Printf("ERROR: error generating token: %v\n", err) + api.portal.log.Error("error generating token", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -320,7 +321,7 @@ func (api *portalAPI) sendVerificationLinkByMail(w http.ResponseWriter, req *htt } path := req.Header["Referer"] if len(path) == 0 { - api.portal.log.Printf("ERROR: unable to fetch referer URL") + api.portal.log.Error("unable to fetch referer URL") writeError(w, Error{ Code: httpErrorInternal, @@ -337,7 +338,7 @@ func (api *portalAPI) sendVerificationLinkByMail(w http.ResponseWriter, req *htt t := template.New("verify") t, err = t.Parse(verifyTemplate) if err != nil { - api.portal.log.Printf("ERROR: unable to parse HTML template: %v\n", err) + api.portal.log.Error("unable to parse HTML template", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -351,7 +352,7 @@ func (api *portalAPI) sendVerificationLinkByMail(w http.ResponseWriter, req *htt // Send verification link by email. err = api.portal.ms.SendMail("Sia Satellite", email, "Action Required", &b) if err != nil { - api.portal.log.Printf("ERROR: unable to send verification link: %v\n", err) + api.portal.log.Error("unable to send verification link", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -369,7 +370,7 @@ func (api *portalAPI) sendPasswordResetLinkByMail(w http.ResponseWriter, req *ht // Generate a password reset link. token, err := api.portal.generateToken(resetPrefix, email, time.Now().Add(time.Hour)) if err != nil { - api.portal.log.Printf("ERROR: error generating token: %v\n", err) + api.portal.log.Error("error generating token", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -379,7 +380,7 @@ func (api *portalAPI) sendPasswordResetLinkByMail(w http.ResponseWriter, req *ht } path := req.Header["Referer"] if len(path) == 0 { - api.portal.log.Printf("ERROR: unable to fetch referer URL") + api.portal.log.Error("unable to fetch referer URL") writeError(w, Error{ Code: httpErrorInternal, @@ -396,7 +397,7 @@ func (api *portalAPI) sendPasswordResetLinkByMail(w http.ResponseWriter, req *ht t := template.New("reset") t, err = t.Parse(resetTemplate) if err != nil { - api.portal.log.Printf("ERROR: unable to parse HTML template: %v\n", err) + api.portal.log.Error("unable to parse HTML template", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -410,7 +411,7 @@ func (api *portalAPI) sendPasswordResetLinkByMail(w http.ResponseWriter, req *ht // Send password reset link by email. err = api.portal.ms.SendMail("Sia Satellite", email, "Reset Your Password", &b) if err != nil { - api.portal.log.Printf("ERROR: unable to send password reset link: %v\n", err) + api.portal.log.Error("unable to send password reset link", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -488,7 +489,7 @@ func (api *portalAPI) authHandlerGET(w http.ResponseWriter, req *http.Request, _ // Check if any promo action is running. err = api.portal.creditAccount(email) if err != nil { - api.portal.log.Printf("ERROR: unable to credit user account: %v\n", err) + api.portal.log.Error("unable to credit user account", zap.Error(err)) } case resetPrefix: @@ -505,7 +506,7 @@ func (api *portalAPI) authHandlerGET(w http.ResponseWriter, req *http.Request, _ // Check if the email address is already registered. exists, cErr := api.portal.userExists(email) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -530,7 +531,7 @@ func (api *portalAPI) authHandlerGET(w http.ResponseWriter, req *http.Request, _ // Set the expiration the same as of the password reset token. ct, err := api.portal.generateToken(changePrefix, email, expires) if err != nil { - api.portal.log.Printf("ERROR: error generating token: %v\n", err) + api.portal.log.Error("error generating token", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -588,7 +589,7 @@ func (api *portalAPI) resetHandlerPOST(w http.ResponseWriter, req *http.Request, // Check if such account exists. exists, cErr := api.portal.userExists(data.Email) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -686,7 +687,7 @@ func (api *portalAPI) changeHandlerPOST(w http.ResponseWriter, req *http.Request // Check if the user account exists. exists, cErr := api.portal.userExists(email) if cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -709,7 +710,7 @@ func (api *portalAPI) changeHandlerPOST(w http.ResponseWriter, req *http.Request // verified in any case, because the email address has // been verified. if cErr := api.portal.updateAccount(email, password, true); cErr != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", cErr) + api.portal.log.Error("error querying database", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -733,7 +734,7 @@ func (api *portalAPI) deleteHandlerGET(w http.ResponseWriter, req *http.Request, // Check if the account is allowed to be deleted. ub, err := api.portal.manager.GetBalance(email) if err != nil { - api.portal.log.Printf("ERROR: couldn't get account balance: %v\n", err) + api.portal.log.Error("couldn't get account balance", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -753,7 +754,7 @@ func (api *portalAPI) deleteHandlerGET(w http.ResponseWriter, req *http.Request, // Remove the account from the database. if err = api.portal.deleteAccount(email); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -795,7 +796,7 @@ func (api *portalAPI) verifyCookie(w http.ResponseWriter, token string) (email s // Check if the user account exists. exists, err := api.portal.userExists(email) if err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, diff --git a/modules/portal/dashboard.go b/modules/portal/dashboard.go index af14891..8900765 100644 --- a/modules/portal/dashboard.go +++ b/modules/portal/dashboard.go @@ -12,6 +12,7 @@ import ( "github.com/julienschmidt/httprouter" "github.com/mike76-dev/sia-satellite/internal/build" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" @@ -135,7 +136,7 @@ func (api *portalAPI) balanceHandlerGET(w http.ResponseWriter, req *http.Request // Retrieve the balance information from the database. var ub modules.UserBalance if ub, err = api.portal.manager.GetBalance(email); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -232,7 +233,7 @@ func (api *portalAPI) hostsHandlerPOST(w http.ResponseWriter, req *http.Request, // Sanity check. if scRate == 0 { - api.portal.log.Println("ERROR: zero exchange rate") + api.portal.log.Error("zero exchange rate") writeError(w, Error{ Code: httpErrorInternal, @@ -270,7 +271,7 @@ func (api *portalAPI) hostsHandlerPOST(w http.ResponseWriter, req *http.Request, // Pick random hosts. hosts, err := api.portal.manager.RandomHosts(a.Hosts, a) if err != nil { - api.portal.log.Println("ERROR: could not get random hosts", err) + api.portal.log.Error("could not get random hosts", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -325,8 +326,8 @@ func (api *portalAPI) hostsHandlerPOST(w http.ResponseWriter, req *http.Request, // Add the cost of paying the transaction fees and then double the contract // costs to account for renewing a full set of contracts. - _, feePerByte := api.portal.manager.FeeEstimation() - txnsFees := feePerByte.Mul64(modules.EstimatedFileContractTransactionSetSize).Mul64(uint64(a.Hosts)) + feePerByte := api.portal.manager.FeeEstimation() + txnsFees := feePerByte.Mul64(2048).Mul64(uint64(a.Hosts)) totalContractCost = totalContractCost.Add(txnsFees) totalContractCost = totalContractCost.Mul64(2) @@ -373,7 +374,7 @@ func (api *portalAPI) paymentsHandlerGET(w http.ResponseWriter, req *http.Reques // Retrieve the payment history. var ups []userPayment if ups, err = api.portal.getPayments(email); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -397,7 +398,7 @@ func (api *portalAPI) seedHandlerGET(w http.ResponseWriter, req *http.Request, _ // Retrieve the account balance. var ub modules.UserBalance if ub, err = api.portal.manager.GetBalance(email); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -417,17 +418,7 @@ func (api *portalAPI) seedHandlerGET(w http.ResponseWriter, req *http.Request, _ } // Generate the seed and wipe it after use. - walletSeed, err := api.portal.manager.GetWalletSeed() - if err != nil { - api.portal.log.Printf("ERROR: error retrieving wallet seed: %v\n", err) - writeError(w, - Error{ - Code: httpErrorInternal, - Message: "internal error", - }, http.StatusInternalServerError) - return - } - renterSeed := modules.DeriveRenterSeed(walletSeed, email) + renterSeed := api.portal.w.RenterSeed(email) defer frand.Read(renterSeed) w.Header().Set("Renter-Seed", hex.EncodeToString(renterSeed)) @@ -439,7 +430,7 @@ func (api *portalAPI) keyHandlerGET(w http.ResponseWriter, _ *http.Request, _ ht key := api.portal.provider.PublicKey() satPort, err := strconv.ParseInt(strings.TrimPrefix(api.portal.satAddr, ":"), 10, 32) if err != nil { - api.portal.log.Println("ERROR: couldn't fetch satellite port:", err) + api.portal.log.Error("couldn't fetch satellite port", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -449,7 +440,7 @@ func (api *portalAPI) keyHandlerGET(w http.ResponseWriter, _ *http.Request, _ ht } muxPort, err := strconv.ParseInt(strings.TrimPrefix(api.portal.muxAddr, ":"), 10, 32) if err != nil { - api.portal.log.Println("ERROR: couldn't fetch mux port:", err) + api.portal.log.Error("couldn't fetch mux port", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -559,7 +550,7 @@ func (api *portalAPI) getContracts(renter modules.Renter, current, old bool) []r // A contract can either be active, passive, refreshed, or disabled. statusErr := active && passive && refreshed || active && refreshed || active && passive || passive && refreshed if statusErr { - api.portal.log.Println("CRITICAL: Contract has multiple status types, this should never happen") + api.portal.log.Error("contract has multiple status types, this should never happen") } else if active { contract.Status = "active" rc = append(rc, contract) @@ -672,7 +663,7 @@ func (api *portalAPI) spendingsHandlerGET(w http.ResponseWriter, req *http.Reque } var us []modules.UserSpendings if us, err = api.portal.manager.RetrieveSpendings(email, currency); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -728,7 +719,7 @@ func (api *portalAPI) filesHandlerGET(w http.ResponseWriter, req *http.Request, // Retrieve the file information. sf, err := api.portal.getFiles(renter.PublicKey) if err != nil { - api.portal.log.Printf("ERROR: couldn't retrieve files: %v\n", err) + api.portal.log.Error("couldn't retrieve files", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -740,7 +731,7 @@ func (api *portalAPI) filesHandlerGET(w http.ResponseWriter, req *http.Request, // Retrieve the file information. bf, err := api.portal.getBufferedFiles(renter.PublicKey) if err != nil { - api.portal.log.Printf("ERROR: couldn't retrieve buffered files: %v\n", err) + api.portal.log.Error("couldn't retrieve buffered files", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -787,7 +778,7 @@ func (api *portalAPI) filesHandlerPOST(w http.ResponseWriter, req *http.Request, // Delete the files. err = api.portal.deleteFiles(renter.PublicKey, data.Files) if err != nil { - api.portal.log.Printf("ERROR: couldn't delete files: %v\n", err) + api.portal.log.Error("couldn't delete files", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -850,7 +841,7 @@ func (api *portalAPI) settingsHandlerPOST(w http.ResponseWriter, req *http.Reque err = api.portal.manager.UpdateRenterSettings(renter.PublicKey, settings, sk, ak) if err != nil { - api.portal.log.Printf("ERROR: couldn't update settings: %v\n", err) + api.portal.log.Error("couldn't update settings", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -916,7 +907,7 @@ func (api *portalAPI) fileHandlerGET(w http.ResponseWriter, req *http.Request, _ err = api.portal.manager.DownloadObject(w, renter.PublicKey, bucket, path) if err != nil { - api.portal.log.Printf("ERROR: couldn't download file: %v\n", err) + api.portal.log.Error("couldn't download file", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -938,7 +929,7 @@ func (api *portalAPI) addressHandlerGET(w http.ResponseWriter, req *http.Request // Get the wallet address. address, err := api.portal.getSiacoinAddress(email) if err != nil { - api.portal.log.Printf("ERROR: error getting payment address: %v\n", err) + api.portal.log.Error("error getting payment address", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -964,7 +955,7 @@ func (api *portalAPI) planHandlerPOST(w http.ResponseWriter, req *http.Request, // Check if the change is allowed. ub, err := api.portal.manager.GetBalance(email) if err != nil { - api.portal.log.Printf("ERROR: error getting account balance: %v\n", err) + api.portal.log.Error("error getting account balance", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -985,7 +976,7 @@ func (api *portalAPI) planHandlerPOST(w http.ResponseWriter, req *http.Request, if !ub.Subscribed { dpm, err := isDefaultPaymentMethodSet(ub.StripeID) if err != nil { - api.portal.log.Println("ERROR: wrong Stripe ID:", email, ub.StripeID) + api.portal.log.Error("wrong Stripe ID", zap.String("email", email), zap.String("stripeID", ub.StripeID), zap.Error(err)) writeError(w, Error{ Code: httpErrorBadRequest, @@ -1004,7 +995,7 @@ func (api *portalAPI) planHandlerPOST(w http.ResponseWriter, req *http.Request, // Change the payment plan. err = api.portal.changePaymentPlan(email) if err != nil { - api.portal.log.Printf("ERROR: error changing payment plan: %v\n", err) + api.portal.log.Error("error changing payment plan", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -1026,7 +1017,7 @@ func (api *portalAPI) announcementHandlerGET(w http.ResponseWriter, req *http.Re // Get the current announcement. text, _, err := api.portal.GetAnnouncement() if err != nil { - api.portal.log.Printf("ERROR: error getting announcement: %v\n", err) + api.portal.log.Error("error getting announcement", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, diff --git a/modules/portal/database.go b/modules/portal/database.go index 97d7859..30517c8 100644 --- a/modules/portal/database.go +++ b/modules/portal/database.go @@ -15,6 +15,7 @@ import ( "time" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" @@ -123,7 +124,7 @@ func (p *Portal) threadedPruneUnverifiedAccounts() { now := time.Now().Unix() _, err = p.db.Exec("DELETE FROM pt_accounts WHERE verified = FALSE AND time < ?", now-pruneUnverifiedAccountsThreshold.Milliseconds()/1000) if err != nil { - p.log.Printf("ERROR: error querying database: %v\n", err) + p.log.Error("error querying database", zap.Error(err)) } }() } @@ -296,12 +297,7 @@ func (p *Portal) addPayment(id string, amount float64, currency string, def bool } if c == 0 { // New renter, need to create a new record. - seed, err := p.manager.GetWalletSeed() - defer frand.Read(seed[:]) - if err != nil { - return err - } - renterSeed := modules.DeriveRenterSeed(seed, email) + renterSeed := p.w.RenterSeed(email) defer frand.Read(renterSeed) pk := types.NewPrivateKeyFromSeed(renterSeed).PublicKey() if err = p.createNewRenter(email, pk); err != nil { @@ -425,13 +421,7 @@ func (p *Portal) confirmSiacoinPayment(txid types.TransactionID) error { } if count == 0 { // New renter, need to create a new record. - seed, err := p.manager.GetWalletSeed() - defer frand.Read(seed[:]) - if err != nil { - tx.Rollback() - return err - } - renterSeed := modules.DeriveRenterSeed(seed, email) + renterSeed := p.w.RenterSeed(email) defer frand.Read(renterSeed) pk := types.NewPrivateKeyFromSeed(renterSeed).PublicKey() if err = p.createNewRenter(email, pk); err != nil { @@ -520,13 +510,13 @@ func (p *Portal) verifyNonce(email string, nonce []byte) (bool, error) { func (p *Portal) saveStats() error { tx, err := p.db.Begin() if err != nil { - p.log.Println("ERROR: couldn't save auth stats:", err) + p.log.Error("couldn't save auth stats", zap.Error(err)) return err } _, err = tx.Exec("DELETE FROM pt_stats") if err != nil { - p.log.Println("ERROR: couldn't clear auth stats:", err) + p.log.Error("couldn't clear auth stats", zap.Error(err)) tx.Rollback() return err } @@ -539,7 +529,7 @@ func (p *Portal) saveStats() error { VALUES (?, ?, ?, ?, ?, ?, ?) `, ip, entry.FailedLogins.LastAttempt, entry.FailedLogins.Count, entry.Verifications.LastAttempt, entry.Verifications.Count, entry.PasswordResets.LastAttempt, entry.PasswordResets.Count) if err != nil { - p.log.Println("ERROR: couldn't save auth stats:", err) + p.log.Error("couldn't save auth stats", zap.Error(err)) tx.Rollback() return err } @@ -556,7 +546,7 @@ func (p *Portal) loadStats() error { FROM pt_stats `) if err != nil { - p.log.Println("ERROR: couldn't load auth stats:", err) + p.log.Error("couldn't load auth stats", zap.Error(err)) return err } defer rows.Close() @@ -565,7 +555,7 @@ func (p *Portal) loadStats() error { var ip string var ll, lc, vl, vc, rl, rc int64 if err := rows.Scan(&ip, &ll, &lc, &vl, &vc, &rl, &rc); err != nil { - p.log.Println("ERROR: couldn't load auth stats:", err) + p.log.Error("couldn't load auth stats", zap.Error(err)) return err } p.authStats[ip] = authenticationStats{ @@ -595,7 +585,7 @@ func (p *Portal) saveCredits() error { VALUES (1, ?, ?) `, p.credits.Amount, p.credits.Remaining) if err != nil { - p.log.Println("ERROR: couldn't save credit data:", err) + p.log.Error("couldn't save credit data", zap.Error(err)) return err } @@ -615,7 +605,7 @@ func (p *Portal) loadCredits() error { return nil } if err != nil { - p.log.Println("ERROR: couldn't load credit data:", err) + p.log.Error("couldn't load credit data", zap.Error(err)) return err } @@ -952,7 +942,7 @@ func (p *Portal) managedCheckOnHoldAccounts() { WHERE on_hold > 0 AND on_hold < ? `, uint64(time.Now().Unix()-int64(modules.OnHoldThreshold.Seconds()))) if err != nil { - p.log.Println("ERROR: couldn't update account:", err) + p.log.Error("couldn't update account", zap.Error(err)) } } @@ -987,6 +977,6 @@ func (p *Portal) managedCheckAnnouncement() { WHERE expires > 0 AND expires <= ? `, timestamp) if err != nil { - p.log.Println("ERROR: unable to expire announcement:", err) + p.log.Error("unable to expire announcement", zap.Error(err)) } } diff --git a/modules/portal/google.go b/modules/portal/google.go index 1cc00dd..3b2d376 100644 --- a/modules/portal/google.go +++ b/modules/portal/google.go @@ -10,6 +10,7 @@ import ( jwt "github.com/golang-jwt/jwt/v5" "github.com/julienschmidt/httprouter" "github.com/mike76-dev/sia-satellite/external" + "go.uber.org/zap" ) // Google glient ID. @@ -30,7 +31,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Retrieve the action type. action := req.FormValue("action") if action != "signup" && action != "login" { - api.portal.log.Println("ERROR: wrong action type") + api.portal.log.Error("wrong action type") writeError(w, Error{ Code: httpErrorBadRequest, @@ -54,7 +55,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Verify client ID. if data.ClientID != googleClientID { - api.portal.log.Println("ERROR: wrong client ID") + api.portal.log.Error("wrong client ID") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -77,7 +78,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, return key, nil }) if err != nil { - api.portal.log.Println("ERROR: couldn't parse claims:", err) + api.portal.log.Error("couldn't parse claims", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -88,7 +89,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Verify the issuer. if issuer, ok := claims["iss"]; !ok || (issuer != "accounts.google.com" && issuer != "https://accounts.google.com") { - api.portal.log.Println("ERROR: invalid issuer") + api.portal.log.Error("invalid issuer") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -99,7 +100,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Verify the audience. if audience, ok := claims["aud"]; !ok || audience != googleClientID { - api.portal.log.Println("ERROR: invalid audience") + api.portal.log.Error("invalid audience") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -112,7 +113,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, var expires interface{} var ok bool if expires, ok = claims["exp"]; !ok { - api.portal.log.Println("ERROR: invalid expiration time") + api.portal.log.Error("invalid expiration time") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -121,7 +122,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, return } if expires.(float64) < float64(time.Now().Unix()) { - api.portal.log.Println("ERROR: token has expired") + api.portal.log.Error("token has expired") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -132,7 +133,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Check if the email is verified. if verified := claims["email_verified"]; verified != "true" && verified != true { - api.portal.log.Println("ERROR: email not verified") + api.portal.log.Error("email not verified") writeError(w, Error{ Code: httpErrorWrongCredentials, @@ -147,7 +148,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Create an account if it doesn't exist yet. exists, err := api.portal.userExists(email) if err != nil { - api.portal.log.Println("ERROR: couldn't verify user account:", err) + api.portal.log.Error("couldn't verify user account", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -178,7 +179,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, // Create a new account. if err := api.portal.updateAccount(email, "", true); err != nil { - api.portal.log.Printf("ERROR: error querying database: %v\n", err) + api.portal.log.Error("error querying database", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, @@ -201,7 +202,7 @@ func (api *portalAPI) authHandlerPOST(w http.ResponseWriter, req *http.Request, t := time.Now().Add(7 * 24 * time.Hour) token, err := api.portal.generateToken(cookiePrefix, email, t) if err != nil { - api.portal.log.Printf("ERROR: error generating token: %v\n", err) + api.portal.log.Error("error generating token", zap.Error(err)) writeError(w, Error{ Code: httpErrorInternal, diff --git a/modules/portal/network.go b/modules/portal/network.go index ec3f9e4..aaa6cbf 100644 --- a/modules/portal/network.go +++ b/modules/portal/network.go @@ -12,6 +12,7 @@ import ( "sync" "github.com/julienschmidt/httprouter" + "go.uber.org/zap" ) const ( @@ -187,7 +188,7 @@ func (p *Portal) initNetworking(address string) error { // Start the portal API server. srv := &http.Server{Handler: api} go srv.Serve(l) - p.log.Println("INFO: listening on", l.Addr()) + p.log.Info("listening on", zap.Stringer("addr", l.Addr())) // Spin up a goroutine to stop the server on shutdown. go func() { @@ -331,7 +332,7 @@ func (api *portalAPI) handleDecodeError(w http.ResponseWriter, err error) (Error // Otherwise send a 500 Internal Server Error response. default: - api.portal.log.Printf("ERROR: failed to decode JSON: %v\n", err) + api.portal.log.Error("failed to decode JSON", zap.Error(err)) return Error{ Code: httpErrorInternal, Message: "internal error", diff --git a/modules/portal/persist.go b/modules/portal/persist.go index 08963c0..8bfdc06 100644 --- a/modules/portal/persist.go +++ b/modules/portal/persist.go @@ -4,6 +4,7 @@ import ( "time" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" ) const ( @@ -51,7 +52,7 @@ func (p *Portal) threadedSaveLoop() { defer p.mu.Unlock() err = p.save() if err != nil { - p.log.Println("ERROR: unable to save portal persistence:", err) + p.log.Error("unable to save portal persistence", zap.Error(err)) } }() } diff --git a/modules/portal/portal.go b/modules/portal/portal.go index a738ee0..51476d9 100644 --- a/modules/portal/portal.go +++ b/modules/portal/portal.go @@ -2,8 +2,6 @@ package portal import ( "database/sql" - "errors" - "fmt" "net" "path/filepath" "sync" @@ -13,23 +11,15 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/persist" "go.sia.tech/core/types" -) - -var ( - // Nil dependency errors. - errNilDB = errors.New("portal cannot use a nil database") - errNilMail = errors.New("portal cannot use a nil mail client") - errNilCS = errors.New("portal cannot use a nil state") - errNilWallet = errors.New("portal cannot use a nil wallet") - errNilManager = errors.New("portal cannot use a nil manager") - errNilProvider = errors.New("portal cannot use a nil provider") + "go.sia.tech/coreutils/chain" + "go.uber.org/zap" ) // Portal contains the information related to the server. type Portal struct { // Dependencies. db *sql.DB - cs modules.ConsensusSet + cm *chain.Manager w modules.Wallet manager modules.Manager provider modules.Provider @@ -50,44 +40,23 @@ type Portal struct { name string // Utilities. - listener net.Listener - log *persist.Logger - mu sync.Mutex - tg siasync.ThreadGroup - staticAlerter *modules.GenericAlerter - closeChan chan int - ms mail.MailSender + listener net.Listener + log *zap.Logger + mu sync.Mutex + tg siasync.ThreadGroup + closeChan chan int + ms mail.MailSender } // New returns an initialized portal server. -func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cs modules.ConsensusSet, w modules.Wallet, m modules.Manager, p modules.Provider, dir string) (*Portal, error) { +func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cm *chain.Manager, w modules.Wallet, m modules.Manager, p modules.Provider, dir string) (*Portal, error) { var err error - // Check that all the dependencies were provided. - if db == nil { - return nil, errNilDB - } - if ms == nil { - return nil, errNilMail - } - if cs == nil { - return nil, errNilCS - } - if w == nil { - return nil, errNilWallet - } - if m == nil { - return nil, errNilManager - } - if p == nil { - return nil, errNilProvider - } - // Create the portal object. pt := &Portal{ db: db, ms: ms, - cs: cs, + cm: cm, w: w, manager: m, provider: p, @@ -100,8 +69,7 @@ func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cs modules. transactions: make(map[types.TransactionID]types.Address), name: config.Name, - staticAlerter: modules.NewAlerter("portal"), - closeChan: make(chan int, 1), + closeChan: make(chan int, 1), } // Call stop in the event of a partial startup. @@ -113,20 +81,16 @@ func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cs modules. }() // Create the logger. - pt.log, err = persist.NewFileLogger(filepath.Join(dir, "portal.log")) + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "portal.log")) if err != nil { return nil, err } // Establish the closing of the logger. pt.tg.AfterStop(func() { - err := pt.log.Close() - if err != nil { - // The logger may or may not be working here, so use a Println - // instead. - fmt.Println("ERROR: failed to close the portal logger:", err) - } + closeFn() }) - pt.log.Println("INFO: portal created, started logging") + pt.log = logger + pt.log.Info("portal created, started logging") // Load the portal persistence. if err = pt.load(); err != nil { @@ -156,29 +120,25 @@ func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cs modules. pt.mu.Lock() defer pt.mu.Unlock() if err := pt.save(); err != nil { - pt.log.Println("ERROR: unable to save portal:", err) + pt.log.Error("unable to save portal", zap.Error(err)) } }) // Start listening to API requests. if err = pt.initNetworking("127.0.0.1" + pt.apiPort); err != nil { - pt.log.Println("ERROR: unable to start the portal server:", err) + pt.log.Error("unable to start the portal server", zap.Error(err)) return nil, err } // Subscribe to the consensus set using the most recent consensus change. go func() { - err := pt.cs.ConsensusSetSubscribe(pt, modules.ConsensusChangeRecent, pt.tg.StopChan()) - if modules.ContainsError(err, siasync.ErrStopped) { - return - } + err := pt.sync(pt.cm.Tip()) if err != nil { - pt.log.Critical(err) + pt.log.Error("couldn't subscribe to consensus updates", zap.Error(err)) return } }() pt.tg.OnStop(func() { - pt.cs.Unsubscribe(pt) // We don't want any recently made payments to go unnoticed. pt.managedCheckWallet() }) @@ -186,6 +146,28 @@ func New(config *persist.SatdConfig, db *sql.DB, ms mail.MailSender, cs modules. return pt, nil } +func (p *Portal) sync(index types.ChainIndex) error { + for index != p.cm.Tip() { + select { + case <-p.tg.StopChan(): + return nil + default: + } + crus, caus, err := p.cm.UpdatesSince(index, 1000) + if err != nil { + p.log.Error("failed to subscribe to chain manager", zap.Error(err)) + return err + } else if err := p.UpdateChainState(crus, caus); err != nil { + p.log.Error("failed to update chain state", zap.Error(err)) + return err + } + if len(caus) > 0 { + index = caus[len(caus)-1].State.Index + } + } + return nil +} + // Close shuts down the portal. func (p *Portal) Close() error { // Shut down the listener. diff --git a/modules/portal/protect.go b/modules/portal/protect.go index 1ce5f47..ff70658 100644 --- a/modules/portal/protect.go +++ b/modules/portal/protect.go @@ -34,17 +34,17 @@ const ( type ( // authAttempts keeps track of specific authentication activities. authAttempts struct { - LastAttempt int64 - Count int64 + LastAttempt int64 + Count int64 } // authenticationStats is the summary of authentication attempts // from a single IP address. authenticationStats struct { - RemoteHost string - FailedLogins authAttempts - Verifications authAttempts - PasswordResets authAttempts + RemoteHost string + FailedLogins authAttempts + Verifications authAttempts + PasswordResets authAttempts } ) @@ -114,11 +114,11 @@ func (p *Portal) checkAndUpdateVerifications(host string) error { // No such IP in the map yet. if !ok { p.authStats[host] = authenticationStats{ - RemoteHost: host, + RemoteHost: host, FailedLogins: authAttempts{}, Verifications: authAttempts{ LastAttempt: time.Now().Unix(), - Count: 1, + Count: 1, }, PasswordResets: authAttempts{}, } @@ -126,7 +126,7 @@ func (p *Portal) checkAndUpdateVerifications(host string) error { } // IP exists but no verification requests yet. - if (stats.Verifications.Count == 0) { + if stats.Verifications.Count == 0 { stats.Verifications.LastAttempt = time.Now().Unix() stats.Verifications.Count = 1 p.authStats[host] = stats @@ -134,15 +134,15 @@ func (p *Portal) checkAndUpdateVerifications(host string) error { } // Check for abuse. - stats.Verifications.LastAttempt = time.Now().Unix() - stats.Verifications.Count++ span := time.Now().Unix() - stats.Verifications.LastAttempt if span == 0 { span = 1 // To avoid division by zero. } + stats.Verifications.LastAttempt = time.Now().Unix() + stats.Verifications.Count++ p.authStats[host] = stats - if float64(stats.Verifications.Count) / float64(span) > maxVerifications { + if float64(stats.Verifications.Count)/float64(span) > maxVerifications { return errors.New("too many verification requests from " + host) } @@ -163,16 +163,16 @@ func (p *Portal) checkAndUpdateFailedLogins(host string) error { RemoteHost: host, FailedLogins: authAttempts{ LastAttempt: time.Now().Unix(), - Count: 1, + Count: 1, }, - Verifications: authAttempts{}, + Verifications: authAttempts{}, PasswordResets: authAttempts{}, } return nil } // IP exists but no failed logins yet. - if (stats.FailedLogins.Count == 0) { + if stats.FailedLogins.Count == 0 { stats.FailedLogins.LastAttempt = time.Now().Unix() stats.FailedLogins.Count = 1 p.authStats[host] = stats @@ -180,15 +180,15 @@ func (p *Portal) checkAndUpdateFailedLogins(host string) error { } // Check for abuse. - stats.FailedLogins.LastAttempt = time.Now().Unix() - stats.FailedLogins.Count++ span := time.Now().Unix() - stats.FailedLogins.LastAttempt if span == 0 { span = 1 // To avoid division by zero. } + stats.FailedLogins.LastAttempt = time.Now().Unix() + stats.FailedLogins.Count++ p.authStats[host] = stats - if float64(stats.FailedLogins.Count) / float64(span) > maxFailedLogins { + if float64(stats.FailedLogins.Count)/float64(span) > maxFailedLogins { return errors.New("too many failed logins from " + host) } @@ -206,19 +206,19 @@ func (p *Portal) checkAndUpdatePasswordResets(host string) error { // No such IP in the map yet. if !ok { p.authStats[host] = authenticationStats{ - RemoteHost: host, - FailedLogins: authAttempts{}, + RemoteHost: host, + FailedLogins: authAttempts{}, Verifications: authAttempts{}, PasswordResets: authAttempts{ LastAttempt: time.Now().Unix(), - Count: 1, + Count: 1, }, } return nil } // IP exists but no password resets yet. - if (stats.PasswordResets.Count == 0) { + if stats.PasswordResets.Count == 0 { stats.PasswordResets.LastAttempt = time.Now().Unix() stats.PasswordResets.Count = 1 p.authStats[host] = stats @@ -226,15 +226,15 @@ func (p *Portal) checkAndUpdatePasswordResets(host string) error { } // Check for abuse. - stats.PasswordResets.LastAttempt = time.Now().Unix() - stats.PasswordResets.Count++ span := time.Now().Unix() - stats.PasswordResets.LastAttempt if span == 0 { span = 1 // To avoid division by zero. } + stats.PasswordResets.LastAttempt = time.Now().Unix() + stats.PasswordResets.Count++ p.authStats[host] = stats - if float64(stats.PasswordResets.Count) / float64(span) > maxPasswordResets { + if float64(stats.PasswordResets.Count)/float64(span) > maxPasswordResets { return errors.New("too many password reset requests from " + host) } diff --git a/modules/portal/stripe.go b/modules/portal/stripe.go index a41bfc5..105c665 100644 --- a/modules/portal/stripe.go +++ b/modules/portal/stripe.go @@ -15,6 +15,7 @@ import ( "github.com/stripe/stripe-go/v75/invoice" "github.com/stripe/stripe-go/v75/paymentintent" "github.com/stripe/stripe-go/v75/webhook" + "go.uber.org/zap" ) // maxBodyBytes specifies the maximum body size for /webhook requests. @@ -121,7 +122,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques // Retrieve account balance. ub, cErr := api.portal.manager.GetBalance(email) if cErr != nil { - api.portal.log.Println("ERROR: Could not fetch account balance:", cErr) + api.portal.log.Error("could not fetch account balance", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -135,7 +136,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques if ub.IsUser && ub.StripeID != "" { cust, cErr = customer.Get(ub.StripeID, nil) if cErr != nil { - api.portal.log.Println("ERROR: Could not get customer:", cErr) + api.portal.log.Error("could not get customer", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -149,7 +150,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques } cust, cErr = customer.New(params) if cErr != nil { - api.portal.log.Println("ERROR: Could not create customer:", cErr) + api.portal.log.Error("could not create customer", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -163,7 +164,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques ub.StripeID = cust.ID cErr = api.portal.manager.UpdateBalance(email, ub) if cErr != nil { - api.portal.log.Println("ERROR: Could not update balance:", cErr) + api.portal.log.Error("could not update balance", zap.Error(cErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -200,7 +201,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques } amount, currency, pErr := calculateOrderAmount(id) if pErr != nil { - api.portal.log.Println("ERROR: couldn't read pending payment:", pErr) + api.portal.log.Error("couldn't read pending payment", zap.Error(pErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -221,7 +222,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques pi, pErr := paymentintent.New(params) if pErr != nil { - api.portal.log.Println("ERROR: pi.New:", pErr) + api.portal.log.Error("pi.New", zap.Error(pErr)) writeError(w, Error{ Code: httpErrorInternal, @@ -229,7 +230,7 @@ func (api *portalAPI) paymentHandlerPOST(w http.ResponseWriter, req *http.Reques }, http.StatusInternalServerError) return } - api.portal.log.Printf("pi.New: %v\n", pi.ClientSecret) + api.portal.log.Info("pi.New", zap.String("clientSecret", pi.ClientSecret)) writeJSON(w, struct { ClientSecret string `json:"clientSecret"` @@ -244,7 +245,7 @@ func (api *portalAPI) webhookHandlerPOST(w http.ResponseWriter, req *http.Reques req.Body = http.MaxBytesReader(w, req.Body, maxBodyBytes) payload, err := io.ReadAll(req.Body) if err != nil { - api.portal.log.Println("Error reading request body:", err) + api.portal.log.Error("error reading request body", zap.Error(err)) w.WriteHeader(http.StatusServiceUnavailable) return } @@ -253,7 +254,7 @@ func (api *portalAPI) webhookHandlerPOST(w http.ResponseWriter, req *http.Reques endpointSecret := os.Getenv("SATD_STRIPE_WEBHOOK_KEY") event, err := webhook.ConstructEvent(payload, req.Header.Get("Stripe-Signature"), endpointSecret) if err != nil { - api.portal.log.Println("Error verifying webhook signature:", err) + api.portal.log.Error("error verifying webhook signature", zap.Error(err)) w.WriteHeader(http.StatusBadRequest) return } @@ -265,7 +266,7 @@ func (api *portalAPI) webhookHandlerPOST(w http.ResponseWriter, req *http.Reques var paymentIntent stripe.PaymentIntent err := json.Unmarshal(event.Data.Raw, &paymentIntent) if err != nil { - api.portal.log.Println("Error parsing webhook JSON:", err) + api.portal.log.Error("error parsing webhook JSON", zap.Error(err)) w.WriteHeader(http.StatusBadRequest) return } @@ -277,7 +278,7 @@ func (api *portalAPI) webhookHandlerPOST(w http.ResponseWriter, req *http.Reques var paymentIntent stripe.PaymentIntent err := json.Unmarshal(event.Data.Raw, &paymentIntent) if err != nil { - api.portal.log.Println("Error parsing webhook JSON:", err) + api.portal.log.Error("error parsing webhook JSON", zap.Error(err)) w.WriteHeader(http.StatusBadRequest) return } @@ -286,7 +287,7 @@ func (api *portalAPI) webhookHandlerPOST(w http.ResponseWriter, req *http.Reques return default: - api.portal.log.Printf("Unhandled event type: %s\n", event.Type) + api.portal.log.Error("unhandled event type", zap.Any("event", event.Type)) } w.WriteHeader(http.StatusOK) @@ -303,7 +304,7 @@ func (p *Portal) handlePaymentIntentSucceeded(pi stripe.PaymentIntent) { } err := p.addPayment(cust.ID, amount, currency, def) if err != nil { - p.log.Println("ERROR: could not add payment:", err) + p.log.Error("could not add payment", zap.Error(err)) } // If a default payment method was specified, update the customer. @@ -315,7 +316,7 @@ func (p *Portal) handlePaymentIntentSucceeded(pi stripe.PaymentIntent) { } _, err = customer.Update(pi.Customer.ID, params) if err != nil { - p.log.Println("ERROR: couldn't update customer:", err) + p.log.Error("couldn't update customer", zap.Error(err)) } } } @@ -336,7 +337,7 @@ func (p *Portal) handlePaymentIntentFailed(pi stripe.PaymentIntent) { err := p.requestPayment(id, in.ID, amount, currency) if err != nil { - p.log.Println("ERROR: could not request payment:", err) + p.log.Error("could not request payment", zap.Error(err)) } } diff --git a/modules/portal/update.go b/modules/portal/update.go index 8925e8a..3269df4 100644 --- a/modules/portal/update.go +++ b/modules/portal/update.go @@ -3,8 +3,8 @@ package portal import ( "time" - "github.com/mike76-dev/sia-satellite/modules" - "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" + "go.uber.org/zap" ) // walletCheckInterval determines how often the wallet is checked @@ -42,67 +42,77 @@ func (p *Portal) threadedWatchForNewTxns() { func (p *Portal) managedCheckWallet() { addrs, err := p.getSiacoinAddresses() if err != nil { - p.log.Println("ERROR: couldn't get account addresses:", err) + p.log.Error("couldn't get account addresses", zap.Error(err)) return } p.mu.Lock() defer p.mu.Unlock() - for addr, email := range addrs { - pts, err := p.w.AddressUnconfirmedTransactions(addr) - if err != nil { - p.log.Println("ERROR: couldn't get unconfirmed transactions:", err) + txns := p.cm.PoolTransactions() + for _, txn := range txns { + txid := txn.ID() + if _, exists := p.transactions[txid]; exists { continue } - - for _, pt := range pts { - if _, exists := p.transactions[pt.TransactionID]; exists { - continue - } - for _, output := range pt.Outputs { - if output.FundType == types.SpecifierSiacoinOutput && output.WalletAddress { - err := p.addSiacoinPayment(email, output.Value, pt.TransactionID) - if err != nil { - p.log.Println("ERROR: couldn't add SC payment:", err) - continue - } - p.transactions[pt.TransactionID] = addr + for _, sco := range txn.SiacoinOutputs { + if email, exists := addrs[sco.Address]; exists { + if err := p.addSiacoinPayment(email, sco.Value, txid); err != nil { + p.log.Error("couldn't add SC payment", zap.Error(err)) + continue } + p.transactions[txid] = sco.Address } } } } -// ProcessConsensusChange gets called to inform Portal about the -// changes in the consensus set. -func (p *Portal) ProcessConsensusChange(cc modules.ConsensusChange) { - for _, block := range cc.RevertedBlocks { - for _, txn := range block.Transactions { - _, exists := p.transactions[txn.ID()] - if exists { - p.mu.Lock() +// UpdateChainState applies or reverts the updates from the ChainManager. +func (p *Portal) UpdateChainState(reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error { + addrs, err := p.getSiacoinAddresses() + if err != nil { + p.log.Error("couldn't get account addresses", zap.Error(err)) + return err + } + + p.mu.Lock() + defer p.mu.Unlock() + + for _, cru := range reverted { + for _, txn := range cru.Block.Transactions { + if _, exists := p.transactions[txn.ID()]; exists { delete(p.transactions, txn.ID()) - p.mu.Unlock() - err := p.revertSiacoinPayment(txn.ID()) - if err != nil { - p.log.Println("ERROR: couldn't revert SC payment:", err) + if err := p.revertSiacoinPayment(txn.ID()); err != nil { + p.log.Error("couldn't revert SC payment", zap.Error(err)) } } } } - for range cc.AppliedBlocks { - p.mu.Lock() - txns := p.transactions - p.mu.Unlock() - for txid := range txns { - err := p.confirmSiacoinPayment(txid) - if err != nil { - p.log.Println("ERROR: couldn't confirm SC payment:", err) + for _, cau := range applied { + for _, txn := range cau.Block.Transactions { + txid := txn.ID() + for _, sco := range txn.SiacoinOutputs { + if email, exists := addrs[sco.Address]; exists { + if _, exists := p.transactions[txid]; !exists { + if err := p.addSiacoinPayment(email, sco.Value, txid); err != nil { + p.log.Error("couldn't add SC payment", zap.Error(err)) + continue + } + p.transactions[txid] = sco.Address + } + } } } } + + for txid := range p.transactions { + if err := p.confirmSiacoinPayment(txid); err != nil { + p.log.Error("couldn't confirm SC payment", zap.Error(err)) + } + } + + return nil } // threadedCheckOnHoldAccounts performs the account checks with set diff --git a/modules/provider.go b/modules/provider.go index c0e6ee4..819ce3b 100644 --- a/modules/provider.go +++ b/modules/provider.go @@ -8,8 +8,6 @@ import ( // Provider implements the methods necessary to communicate with the // renters. type Provider interface { - Alerter - // Close safely shuts down the provider. Close() error @@ -38,11 +36,11 @@ func (ec ExtendedContract) EncodeTo(e *types.Encoder) { ec.Contract.Signatures[0].EncodeTo(e) ec.Contract.Signatures[1].EncodeTo(e) e.WriteUint64(ec.StartHeight) - ec.ContractPrice.EncodeTo(e) - ec.TotalCost.EncodeTo(e) - ec.UploadSpending.EncodeTo(e) - ec.DownloadSpending.EncodeTo(e) - ec.FundAccountSpending.EncodeTo(e) + types.V1Currency(ec.ContractPrice).EncodeTo(e) + types.V1Currency(ec.TotalCost).EncodeTo(e) + types.V1Currency(ec.UploadSpending).EncodeTo(e) + types.V1Currency(ec.DownloadSpending).EncodeTo(e) + types.V1Currency(ec.FundAccountSpending).EncodeTo(e) ec.RenewedFrom.EncodeTo(e) } @@ -93,11 +91,11 @@ func (cm *ContractMetadata) EncodeTo(e *types.Encoder) { e.Write(cm.HostKey[:]) e.WriteUint64(cm.StartHeight) e.Write(cm.RenewedFrom[:]) - cm.UploadSpending.EncodeTo(e) - cm.DownloadSpending.EncodeTo(e) - cm.FundAccountSpending.EncodeTo(e) - cm.ContractPrice.EncodeTo(e) - cm.TotalCost.EncodeTo(e) + types.V1Currency(cm.UploadSpending).EncodeTo(e) + types.V1Currency(cm.DownloadSpending).EncodeTo(e) + types.V1Currency(cm.FundAccountSpending).EncodeTo(e) + types.V1Currency(cm.ContractPrice).EncodeTo(e) + types.V1Currency(cm.TotalCost).EncodeTo(e) cm.Revision.EncodeTo(e) } @@ -107,10 +105,10 @@ func (cm *ContractMetadata) DecodeFrom(d *types.Decoder) { d.Read(cm.HostKey[:]) cm.StartHeight = d.ReadUint64() d.Read(cm.RenewedFrom[:]) - cm.UploadSpending.DecodeFrom(d) - cm.DownloadSpending.DecodeFrom(d) - cm.FundAccountSpending.DecodeFrom(d) - cm.ContractPrice.DecodeFrom(d) - cm.TotalCost.DecodeFrom(d) + (*types.V1Currency)(&cm.UploadSpending).DecodeFrom(d) + (*types.V1Currency)(&cm.DownloadSpending).DecodeFrom(d) + (*types.V1Currency)(&cm.FundAccountSpending).DecodeFrom(d) + (*types.V1Currency)(&cm.ContractPrice).DecodeFrom(d) + (*types.V1Currency)(&cm.TotalCost).DecodeFrom(d) cm.Revision.DecodeFrom(d) } diff --git a/modules/provider/alert.go b/modules/provider/alert.go deleted file mode 100644 index 1a11d07..0000000 --- a/modules/provider/alert.go +++ /dev/null @@ -1,8 +0,0 @@ -package provider - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the provider. -func (p *Provider) Alerts() (crit, err, warn, info []modules.Alert) { - return p.staticAlerter.Alerts() -} diff --git a/modules/provider/encoding.go b/modules/provider/encoding.go index af3eec8..4d83c86 100644 --- a/modules/provider/encoding.go +++ b/modules/provider/encoding.go @@ -136,13 +136,13 @@ func (fr *formRequest) DecodeFrom(d *types.Decoder) { fr.Download = d.ReadUint64() fr.MinShards = d.ReadUint64() fr.TotalShards = d.ReadUint64() - fr.MaxRPCPrice.DecodeFrom(d) - fr.MaxContractPrice.DecodeFrom(d) - fr.MaxDownloadPrice.DecodeFrom(d) - fr.MaxUploadPrice.DecodeFrom(d) - fr.MaxStoragePrice.DecodeFrom(d) - fr.MaxSectorAccessPrice.DecodeFrom(d) - fr.MinMaxCollateral.DecodeFrom(d) + (*types.V1Currency)(&fr.MaxRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MaxContractPrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MaxDownloadPrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MaxUploadPrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MaxStoragePrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MaxSectorAccessPrice).DecodeFrom(d) + (*types.V1Currency)(&fr.MinMaxCollateral).DecodeFrom(d) fr.BlockHeightLeeway = d.ReadUint64() fr.UploadPacking = d.ReadBool() fr.Signature.DecodeFrom(d) @@ -160,13 +160,13 @@ func (fr *formRequest) EncodeTo(e *types.Encoder) { e.WriteUint64(fr.Download) e.WriteUint64(fr.MinShards) e.WriteUint64(fr.TotalShards) - fr.MaxRPCPrice.EncodeTo(e) - fr.MaxContractPrice.EncodeTo(e) - fr.MaxDownloadPrice.EncodeTo(e) - fr.MaxUploadPrice.EncodeTo(e) - fr.MaxStoragePrice.EncodeTo(e) - fr.MaxSectorAccessPrice.EncodeTo(e) - fr.MinMaxCollateral.EncodeTo(e) + types.V1Currency(fr.MaxRPCPrice).EncodeTo(e) + types.V1Currency(fr.MaxContractPrice).EncodeTo(e) + types.V1Currency(fr.MaxDownloadPrice).EncodeTo(e) + types.V1Currency(fr.MaxUploadPrice).EncodeTo(e) + types.V1Currency(fr.MaxStoragePrice).EncodeTo(e) + types.V1Currency(fr.MaxSectorAccessPrice).EncodeTo(e) + types.V1Currency(fr.MinMaxCollateral).EncodeTo(e) e.WriteUint64(fr.BlockHeightLeeway) e.WriteBool(fr.UploadPacking) } @@ -217,13 +217,13 @@ func (rr *renewRequest) DecodeFrom(d *types.Decoder) { rr.Download = d.ReadUint64() rr.MinShards = d.ReadUint64() rr.TotalShards = d.ReadUint64() - rr.MaxRPCPrice.DecodeFrom(d) - rr.MaxContractPrice.DecodeFrom(d) - rr.MaxDownloadPrice.DecodeFrom(d) - rr.MaxUploadPrice.DecodeFrom(d) - rr.MaxStoragePrice.DecodeFrom(d) - rr.MaxSectorAccessPrice.DecodeFrom(d) - rr.MinMaxCollateral.DecodeFrom(d) + (*types.V1Currency)(&rr.MaxRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MaxContractPrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MaxDownloadPrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MaxUploadPrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MaxStoragePrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MaxSectorAccessPrice).DecodeFrom(d) + (*types.V1Currency)(&rr.MinMaxCollateral).DecodeFrom(d) rr.BlockHeightLeeway = d.ReadUint64() rr.UploadPacking = d.ReadBool() rr.Signature.DecodeFrom(d) @@ -244,13 +244,13 @@ func (rr *renewRequest) EncodeTo(e *types.Encoder) { e.WriteUint64(rr.Download) e.WriteUint64(rr.MinShards) e.WriteUint64(rr.TotalShards) - rr.MaxRPCPrice.EncodeTo(e) - rr.MaxContractPrice.EncodeTo(e) - rr.MaxDownloadPrice.EncodeTo(e) - rr.MaxUploadPrice.EncodeTo(e) - rr.MaxStoragePrice.EncodeTo(e) - rr.MaxSectorAccessPrice.EncodeTo(e) - rr.MinMaxCollateral.EncodeTo(e) + types.V1Currency(rr.MaxRPCPrice).EncodeTo(e) + types.V1Currency(rr.MaxContractPrice).EncodeTo(e) + types.V1Currency(rr.MaxDownloadPrice).EncodeTo(e) + types.V1Currency(rr.MaxUploadPrice).EncodeTo(e) + types.V1Currency(rr.MaxStoragePrice).EncodeTo(e) + types.V1Currency(rr.MaxSectorAccessPrice).EncodeTo(e) + types.V1Currency(rr.MinMaxCollateral).EncodeTo(e) e.WriteUint64(rr.BlockHeightLeeway) e.WriteBool(rr.UploadPacking) } @@ -272,9 +272,9 @@ func (ur *updateRequest) DecodeFrom(d *types.Decoder) { ur.Contract.Revision.DecodeFrom(d) ur.Contract.Signatures[0].DecodeFrom(d) ur.Contract.Signatures[1].DecodeFrom(d) - ur.Uploads.DecodeFrom(d) - ur.Downloads.DecodeFrom(d) - ur.FundAccount.DecodeFrom(d) + (*types.V1Currency)(&ur.Uploads).DecodeFrom(d) + (*types.V1Currency)(&ur.Downloads).DecodeFrom(d) + (*types.V1Currency)(&ur.FundAccount).DecodeFrom(d) ur.Signature.DecodeFrom(d) } @@ -284,9 +284,9 @@ func (ur *updateRequest) EncodeTo(e *types.Encoder) { ur.Contract.Revision.EncodeTo(e) ur.Contract.Signatures[0].EncodeTo(e) ur.Contract.Signatures[1].EncodeTo(e) - ur.Uploads.EncodeTo(e) - ur.Downloads.EncodeTo(e) - ur.FundAccount.EncodeTo(e) + types.V1Currency(ur.Uploads).EncodeTo(e) + types.V1Currency(ur.Downloads).EncodeTo(e) + types.V1Currency(ur.FundAccount).EncodeTo(e) } // formContractRequest is used when forming a contract with a single @@ -475,13 +475,13 @@ func (usr *updateSettingsRequest) DecodeFrom(d *types.Decoder) { usr.Download = d.ReadUint64() usr.MinShards = d.ReadUint64() usr.TotalShards = d.ReadUint64() - usr.MaxRPCPrice.DecodeFrom(d) - usr.MaxContractPrice.DecodeFrom(d) - usr.MaxDownloadPrice.DecodeFrom(d) - usr.MaxUploadPrice.DecodeFrom(d) - usr.MaxStoragePrice.DecodeFrom(d) - usr.MaxSectorAccessPrice.DecodeFrom(d) - usr.MinMaxCollateral.DecodeFrom(d) + (*types.V1Currency)(&usr.MaxRPCPrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MaxContractPrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MaxDownloadPrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MaxUploadPrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MaxStoragePrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MaxSectorAccessPrice).DecodeFrom(d) + (*types.V1Currency)(&usr.MinMaxCollateral).DecodeFrom(d) usr.BlockHeightLeeway = d.ReadUint64() usr.UploadPacking = d.ReadBool() } @@ -510,13 +510,13 @@ func (usr *updateSettingsRequest) EncodeTo(e *types.Encoder) { e.WriteUint64(usr.Download) e.WriteUint64(usr.MinShards) e.WriteUint64(usr.TotalShards) - usr.MaxRPCPrice.EncodeTo(e) - usr.MaxContractPrice.EncodeTo(e) - usr.MaxDownloadPrice.EncodeTo(e) - usr.MaxUploadPrice.EncodeTo(e) - usr.MaxStoragePrice.EncodeTo(e) - usr.MaxSectorAccessPrice.EncodeTo(e) - usr.MinMaxCollateral.EncodeTo(e) + types.V1Currency(usr.MaxRPCPrice).EncodeTo(e) + types.V1Currency(usr.MaxContractPrice).EncodeTo(e) + types.V1Currency(usr.MaxDownloadPrice).EncodeTo(e) + types.V1Currency(usr.MaxUploadPrice).EncodeTo(e) + types.V1Currency(usr.MaxStoragePrice).EncodeTo(e) + types.V1Currency(usr.MaxSectorAccessPrice).EncodeTo(e) + types.V1Currency(usr.MinMaxCollateral).EncodeTo(e) e.WriteUint64(usr.BlockHeightLeeway) e.WriteBool(usr.UploadPacking) } diff --git a/modules/provider/network.go b/modules/provider/network.go index e7a69a5..d5777e6 100644 --- a/modules/provider/network.go +++ b/modules/provider/network.go @@ -8,6 +8,7 @@ import ( "time" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/chacha20poly1305" @@ -19,59 +20,6 @@ import ( "lukechampine.com/frand" ) -// threadedUpdateHostname periodically runs 'managedLearnHostname', which -// checks if the Satellite's hostname has changed. -func (p *Provider) threadedUpdateHostname(closeChan chan struct{}) { - defer close(closeChan) - for { - p.managedLearnHostname() - // Wait 30 minutes to check again. We want the Satellite to be always - // accessible by the renters. - select { - case <-p.tg.StopChan(): - return - case <-time.After(time.Minute * 30): - continue - } - } -} - -// managedLearnHostname discovers the external IP of the Satellite. -func (p *Provider) managedLearnHostname() { - // Fetch the necessary variables. - p.mu.RLock() - satPort := p.port - satAutoAddress := p.autoAddress - p.mu.RUnlock() - - // Use the gateway to get the external ip. - hostname, err := p.g.DiscoverAddress(p.tg.StopChan()) - if err != nil { - p.log.Println("WARN: failed to discover external IP") - return - } - - autoAddress := modules.NetAddress(net.JoinHostPort(hostname.String(), satPort)) - if err := autoAddress.IsValid(); err != nil { - p.log.Printf("WARN: discovered hostname %q is invalid: %v", autoAddress, err) - return - } - if autoAddress == satAutoAddress { - // Nothing to do - the auto address has not changed. - return - } - - p.mu.Lock() - p.autoAddress = autoAddress - err = p.save() - p.mu.Unlock() - if err != nil { - p.log.Println("ERROR: couldn't save provider:", err) - } - - // TODO inform the renters that the Satellite address has changed. -} - // initNetworking performs actions like port forwarding, and gets the // Satellite established on the network. func (p *Provider) initNetworking(address, muxAddr string) (err error) { @@ -86,7 +34,7 @@ func (p *Provider) initNetworking(address, muxAddr string) (err error) { p.tg.OnStop(func() { err := p.listener.Close() if err != nil { - p.log.Println("WARN: closing the listener failed:", err) + p.log.Warn("closing the listener failed", zap.Error(err)) } // Wait until the threadedListener has returned to continue shutdown. @@ -100,35 +48,9 @@ func (p *Provider) initNetworking(address, muxAddr string) (err error) { } p.port = port - // Non-blocking, perform port forwarding and create the hostname discovery - // thread. - go func() { - // Add this function to the threadgroup, so that the logger will not - // disappear before port closing can be registered to the threadgroup - // OnStop functions. - err := p.tg.Add() - if err != nil { - // If this goroutine is not run before shutdown starts, this - // codeblock is reachable. - return - } - defer p.tg.Done() - - err = p.g.ForwardPort(port) - if err != nil { - p.log.Println(err) - } - - threadedUpdateHostnameClosedChan := make(chan struct{}) - go p.threadedUpdateHostname(threadedUpdateHostnameClosedChan) - p.tg.OnStop(func() { - <-threadedUpdateHostnameClosedChan - }) - }() - // Launch the listener. go p.threadedListen(threadedListenerClosedChan) - p.log.Println("INFO: listening on port", port) + p.log.Info("listening on port", zap.String("port", port)) // Create the mux and setup the close procedures. p.mux, err = net.Listen("tcp", muxAddr) @@ -145,7 +67,7 @@ func (p *Provider) initNetworking(address, muxAddr string) (err error) { p.tg.OnStop(func() { err := p.mux.Close() if err != nil { - p.log.Println("WARN: closing the mux failed:", err) + p.log.Warn("closing the mux failed", zap.Error(err)) } // Wait until the threadedListener has returned to continue shutdown. @@ -154,7 +76,7 @@ func (p *Provider) initNetworking(address, muxAddr string) (err error) { // Launch the mux. go p.threadedListenMux(threadedMuxClosedChan) - p.log.Println("INFO: mux listening on port", muxPort) + p.log.Info("mux listening on port", zap.String("port", muxPort)) return nil } @@ -193,7 +115,7 @@ func (p *Provider) threadedListenMux(closeChan chan struct{}) { conn, err := p.mux.Accept() if err != nil { if !errors.Is(err, net.ErrClosed) { - p.log.Println("WARN: falied to accept connection:", err) + p.log.Warn("falied to accept connection", zap.Error(err)) } return } @@ -204,7 +126,7 @@ func (p *Provider) threadedListenMux(closeChan chan struct{}) { // Upgrade the connection to RHP3. t, err := rhpv3.NewHostTransport(conn, p.secretKey) if err != nil { - p.log.Println("ERROR: falied to upgrade connection:", err) + p.log.Error("falied to upgrade connection", zap.Error(err)) return } defer t.Close() @@ -213,7 +135,7 @@ func (p *Provider) threadedListenMux(closeChan chan struct{}) { stream, err := t.AcceptStream() if err != nil { if !strings.Contains(err.Error(), "peer closed stream gracefully") && !strings.Contains(err.Error(), "peer closed underlying connection") { - p.log.Println("ERROR: falied to accept stream:", err) + p.log.Error("falied to accept stream", zap.Error(err)) } return } @@ -253,7 +175,7 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { // Skip if a satellite maintenance is running. if p.m.Maintenance() { - p.log.Println("INFO: closing inbound connection because satellite maintenance is running") + p.log.Info("closing inbound connection because satellite maintenance is running") return } @@ -261,7 +183,7 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { // this if desired. err = conn.SetDeadline(time.Now().Add(defaultConnectionDeadline)) if err != nil { - p.log.Println("WARN: could not set deadline on connection:", err) + p.log.Warn("could not set deadline on connection", zap.Error(err)) return } @@ -272,11 +194,11 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { var req loopKeyExchangeRequest req.DecodeFrom(d) if err = d.Err(); err != nil { - p.log.Println("ERROR: could not read handshake request:", err) + p.log.Error("could not read handshake request", zap.Error(err)) return } if req.Specifier != loopEnterSpecifier { - p.log.Println("ERROR: wrong handshake request specifier") + p.log.Error("wrong handshake request specifier") return } @@ -289,7 +211,7 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { } if !supportsChaCha { (&loopKeyExchangeResponse{Cipher: cipherNoOverlap}).EncodeTo(e) - p.log.Println("ERROR: no supported ciphers") + p.log.Error("no supported ciphers") return } @@ -309,14 +231,14 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { copy(resp.Signature[:], pubkeySig[:]) resp.EncodeTo(e) if err = e.Flush(); err != nil { - p.log.Println("ERROR: could not send handshake response:", err) + p.log.Error("could not send handshake response", zap.Error(err)) return } // Use cipherKey to initialize an AEAD cipher. aead, err := chacha20poly1305.New(cipherKey[:]) if err != nil { - p.log.Println("ERROR: could not create cipher:", err) + p.log.Error("could not create cipher", zap.Error(err)) return } @@ -332,7 +254,7 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { Challenge: s.Challenge, } if err := s.WriteMessage(&challengeReq); err != nil { - p.log.Println("ERROR: could not send challenge:", err) + p.log.Error("could not send challenge", zap.Error(err)) return } @@ -340,7 +262,7 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { var id types.Specifier err = s.ReadMessage(&id, modules.MinMessageSize) if err != nil { - p.log.Println("ERROR: could not read request specifier:", err) + p.log.Error("could not read request specifier", zap.Error(err)) return } @@ -416,10 +338,10 @@ func (p *Provider) threadedHandleConn(conn net.Conn) { err = modules.AddContext(err, "incoming RPCCompleteMultipart failed") } default: - p.log.Println("INFO: inbound connection from:", conn.RemoteAddr()) //TODO + p.log.Info("inbound connection from", zap.Stringer("host", conn.RemoteAddr())) //TODO } if err != nil { - p.log.Printf("ERROR: error with %v: %v\n", conn.RemoteAddr(), err) + p.log.Error("inbound connection failed", zap.Stringer("host", conn.RemoteAddr()), zap.Error(err)) } } @@ -445,7 +367,7 @@ func (p *Provider) threadedHandleStream(s *rhpv3.Stream, addr string) { // Skip if a satellite maintenance is running. if p.m.Maintenance() { - p.log.Println("INFO: closing inbound stream because satellite maintenance is running") + p.log.Info("closing inbound stream because satellite maintenance is running") return } @@ -453,13 +375,13 @@ func (p *Provider) threadedHandleStream(s *rhpv3.Stream, addr string) { // this if desired. err = s.SetDeadline(time.Now().Add(defaultStreamDeadline)) if err != nil { - p.log.Println("ERROR: could not set deadline on stream:", err) + p.log.Error("could not set deadline on stream", zap.Error(err)) return } id, err := s.ReadID() if err != nil { - p.log.Println("ERROR: failed to read RPC ID:", err) + p.log.Error("failed to read RPC ID", zap.Error(err)) return } @@ -485,10 +407,10 @@ func (p *Provider) threadedHandleStream(s *rhpv3.Stream, addr string) { err = modules.AddContext(err, "incoming RPCUploadPart failed") } default: - p.log.Println("INFO: unknown inbound stream from", addr) //TODO + p.log.Info("unknown inbound stream from", zap.String("host", addr)) //TODO } if err != nil { - p.log.Printf("ERROR: error with %v: %v\n", addr, err) + p.log.Error("inbound connection failed", zap.String("host", addr), zap.Error(err)) } } diff --git a/modules/provider/persist.go b/modules/provider/persist.go index 0d7d181..260683a 100644 --- a/modules/provider/persist.go +++ b/modules/provider/persist.go @@ -2,6 +2,7 @@ package provider import ( "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" @@ -22,7 +23,7 @@ func (p *Provider) establishDefaults() { // The generated keys are important, save them. err := p.save() if err != nil { - p.log.Println("ERROR: failed to save provider persistence:", err) + p.log.Error("failed to save provider persistence", zap.Error(err)) } } @@ -64,6 +65,6 @@ func (p *Provider) save() error { REPLACE INTO pr_info (id, public_key, secret_key, address) VALUES (1, ?, ?, ?) `, p.publicKey[:], p.secretKey, p.autoAddress) - + return err } diff --git a/modules/provider/provider.go b/modules/provider/provider.go index f5796a7..f4fb02f 100644 --- a/modules/provider/provider.go +++ b/modules/provider/provider.go @@ -2,8 +2,6 @@ package provider import ( "database/sql" - "errors" - "fmt" "net" "path/filepath" "sync" @@ -11,23 +9,17 @@ import ( siasync "github.com/mike76-dev/sia-satellite/internal/sync" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/persist" + "go.uber.org/zap" "go.sia.tech/core/types" ) -var ( - // Nil dependency errors. - errNilDB = errors.New("provider cannot use a nil database") - errNilGateway = errors.New("provider cannot use nil gateway") - errNilManager = errors.New("provider cannot use a nil manager") -) - // A Provider contains the information necessary to communicate with the // renters. type Provider struct { // Dependencies. db *sql.DB - g modules.Gateway + s modules.Syncer m modules.Manager autoAddress modules.NetAddress // Determined using automatic tooling in network.go @@ -35,26 +27,24 @@ type Provider struct { secretKey types.PrivateKey // Utilities. - listener net.Listener - mux net.Listener - log *persist.Logger - mu sync.RWMutex - port string - tg siasync.ThreadGroup - staticAlerter *modules.GenericAlerter + listener net.Listener + mux net.Listener + log *zap.Logger + mu sync.RWMutex + port string + tg siasync.ThreadGroup } // New returns an initialized Provider. -func New(db *sql.DB, g modules.Gateway, m modules.Manager, satelliteAddr string, muxAddr string, dir string) (*Provider, <-chan error) { +func New(db *sql.DB, s modules.Syncer, m modules.Manager, satelliteAddr string, muxAddr string, dir string) (*Provider, <-chan error) { errChan := make(chan error, 1) var err error // Create the Provider object. p := &Provider{ - db: db, - g: g, - m: m, - staticAlerter: modules.NewAlerter("provider"), + db: db, + s: s, + m: m, } // Call stop in the event of a partial startup. @@ -65,21 +55,17 @@ func New(db *sql.DB, g modules.Gateway, m modules.Manager, satelliteAddr string, }() // Create the logger. - p.log, err = persist.NewFileLogger(filepath.Join(dir, "provider.log")) + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "provider.log")) if err != nil { errChan <- err return nil, errChan } // Establish the closing of the logger. p.tg.AfterStop(func() { - err := p.log.Close() - if err != nil { - // The logger may or may not be working here, so use a Println - // instead. - fmt.Println("Failed to close the provider logger:", err) - } + closeFn() }) - p.log.Println("INFO: provider created, started logging") + p.log = logger + p.log.Info("provider created, started logging") // Load the provider persistence. if loadErr := p.load(); loadErr != nil { @@ -90,7 +76,7 @@ func New(db *sql.DB, g modules.Gateway, m modules.Manager, satelliteAddr string, // Initialize the networking. err = p.initNetworking(satelliteAddr, muxAddr) if err != nil { - p.log.Println("ERROR: could not initialize provider networking:", err) + p.log.Error("could not initialize provider networking", zap.Error(err)) errChan <- err return nil, errChan } diff --git a/modules/provider/rpc.go b/modules/provider/rpc.go index 1721d4e..b9fb978 100644 --- a/modules/provider/rpc.go +++ b/modules/provider/rpc.go @@ -9,6 +9,7 @@ import ( "github.com/mike76-dev/sia-satellite/modules" "github.com/rs/xid" + "go.uber.org/zap" rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" @@ -1076,11 +1077,11 @@ func (p *Provider) managedReceiveFile(s *rhpv3.Stream) error { } defer func() { if err := file.Sync(); err != nil { - p.log.Println("ERROR: couldn't sync file:", err) + p.log.Error("couldn't sync file", zap.Error(err)) } else if err := file.Close(); err != nil { - p.log.Println("ERROR: couldn't close file:", err) + p.log.Error("couldn't close file", zap.Error(err)) } else if err := p.m.RegisterUpload(ur.PubKey, ur.Bucket, ur.Path, ur.MimeType, ur.Enctypted, path, !ud.More); err != nil { - p.log.Println("ERROR: couldn't register file:", err) + p.log.Error("couldn't register file", zap.Error(err)) } }() @@ -1324,11 +1325,11 @@ func (p *Provider) managedReceivePart(s *rhpv3.Stream) error { // Save the file and register the part. if err := file.Sync(); err != nil { - p.log.Println("ERROR: couldn't sync file:", err) + p.log.Error("couldn't sync file", zap.Error(err)) } else if err := file.Close(); err != nil { - p.log.Println("ERROR: couldn't close file:", err) + p.log.Error("couldn't close file", zap.Error(err)) } else if err := p.m.PutMultipartPart(upr.PubKey, upr.UploadID, upr.PartNo, name); err != nil { - p.log.Println("ERROR: couldn't register part:", err) + p.log.Error("couldn't register part", zap.Error(err)) } return nil diff --git a/modules/seed.go b/modules/seed.go index 8dbaa0e..b48cb43 100644 --- a/modules/seed.go +++ b/modules/seed.go @@ -1,7 +1,6 @@ package modules import ( - "crypto/rand" "crypto/sha256" "encoding/binary" "errors" @@ -9,8 +8,8 @@ import ( "strings" "go.sia.tech/core/types" - "golang.org/x/crypto/blake2b" + "lukechampine.com/frand" ) // NOTE: This is not a full implementation of BIP39; only 12-word phrases (128 @@ -22,46 +21,54 @@ func memclr(p []byte) { } } +// Seed represents a 16-byte wallet seed. +type Seed [16]byte + // NewSeedPhrase returns a random seed phrase. func NewSeedPhrase() string { - var entropy Seed - if _, err := rand.Read(entropy[:]); err != nil { - panic("insufficient system entropy") - } - return EncodeBIP39Phrase(entropy) + var entropy [16]byte + frand.Read(entropy[:]) + p := encodeBIP39Phrase(&entropy) + memclr(entropy[:]) + return p } -// KeyFromPhrase returns the Ed25519 key derived from the supplied seed phrase. -func KeyFromPhrase(phrase string) (types.PrivateKey, error) { - entropy, err := DecodeBIP39Phrase(phrase) +// SeedFromPhrase converts the supplied phrase into a 16-byte seed. +func SeedFromPhrase(seed *Seed, phrase string) error { + entropy, err := decodeBIP39Phrase(phrase) if err != nil { - return nil, err + return err } - h := blake2b.Sum256(entropy[:]) + copy(seed[:], entropy[:]) memclr(entropy[:]) + return nil +} + +// KeyFromSeed returns the Ed25519 key derived from the supplied seed +// at the specified index. +func KeyFromSeed(seed *Seed, index uint64) types.PrivateKey { + h := blake2b.Sum256(seed[:]) buf := make([]byte, 32+8) copy(buf[:32], h[:]) + binary.LittleEndian.PutUint64(buf[32:], index) + h = blake2b.Sum256(buf) + key := types.NewPrivateKeyFromSeed(h[:]) memclr(h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - seed := blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(seed[:]) - memclr(seed[:]) - return key, nil + return key } -// bip39checksum is a helper function that generates a checksum of the entropy. -func bip39checksum(entropy Seed) uint64 { +func bip39checksum(entropy *[16]byte) uint64 { hash := sha256.Sum256(entropy[:]) return uint64((hash[0] & 0xF0) >> 4) } -// EncodeBIP39Phrase derives a seed phrase from the entropy. -func EncodeBIP39Phrase(entropy Seed) string { - // Convert entropy to a 128-bit integer. +// encodeBIP39Phrase converts 16-byte entropy to a seed phrase. +func encodeBIP39Phrase(entropy *[16]byte) string { + // convert entropy to a 128-bit integer hi := binary.BigEndian.Uint64(entropy[:8]) lo := binary.BigEndian.Uint64(entropy[8:]) - // Convert each group of 11 bits into a word. + // convert each group of 11 bits into a word words := make([]string, 12) // last word is special: 4 bits are checksum w := ((lo & 0x7F) << 4) | bip39checksum(entropy) @@ -77,43 +84,43 @@ func EncodeBIP39Phrase(entropy Seed) string { return strings.Join(words, " ") } -// DecodeBIP39Phrase converts a seed phrase into a byte slice. -func DecodeBIP39Phrase(phrase string) (Seed, error) { - // Validate that the phrase is well formed and only contains words that - // are present in the word list. +// decodeBIP39Phrase converts the seed phrase to 16-byte entropy. +func decodeBIP39Phrase(phrase string) (*[16]byte, error) { + // validate that the phrase is well formed and only contains words that + // are present in the word list words := strings.Fields(phrase) if n := len(words); n != 12 { - return Seed{}, errors.New("wrong number of words in seed phrase") + return nil, errors.New("wrong number of words in seed phrase") } for _, word := range words { if _, ok := wordMap[word]; !ok { - return Seed{}, fmt.Errorf("unrecognized word %q in seed phrase", word) + return nil, fmt.Errorf("unrecognized word %q in seed phrase", word) } } - // Convert words to 128 bits, 11 bits at a time. + // convert words to 128 bits, 11 bits at a time var lo, hi uint64 for _, v := range words[:len(words)-1] { hi = hi<<11 | lo>>(64-11) lo = lo<<11 | wordMap[v] } - // Last word is special: least-significant 4 bits are checksum, so shift - // them off and only add the remaining 7 bits. + // last word is special: least-significant 4 bits are checksum, so shift + // them off and only add the remaining 7 bits w := wordMap[words[len(words)-1]] checksum := w & 0xF hi = hi<<7 | lo>>(64-7) lo = lo<<7 | w>>4 - // Convert to big-endian byte slice. - var entropy Seed + // convert to big-endian byte slice + var entropy [16]byte binary.BigEndian.PutUint64(entropy[:8], hi) binary.BigEndian.PutUint64(entropy[8:], lo) - // Validate checksum. - if bip39checksum(entropy) != checksum { - return Seed{}, errors.New("invalid checksum") + // validate checksum + if bip39checksum(&entropy) != checksum { + return nil, errors.New("invalid checksum") } - return entropy, nil + return &entropy, nil } var wordMap = func() map[string]uint64 { diff --git a/modules/syncer.go b/modules/syncer.go new file mode 100644 index 0000000..129cac8 --- /dev/null +++ b/modules/syncer.go @@ -0,0 +1,42 @@ +package modules + +import ( + "context" + + "go.sia.tech/core/gateway" + "go.sia.tech/core/types" + "go.sia.tech/coreutils/syncer" +) + +// A Syncer synchronizes blockchain data with peers. +type Syncer interface { + // Addr returns the address of the Syncer. + Addr() string + + // BroadcastHeader broadcasts a header to all peers. + BroadcastHeader(h gateway.BlockHeader) + + // BroadcastTransactionSet broadcasts a transaction set to all peers. + BroadcastTransactionSet(txns []types.Transaction) + + // BroadcastV2BlockOutline broadcasts a v2 block outline to all peers. + BroadcastV2BlockOutline(b gateway.V2BlockOutline) + + // BroadcastV2TransactionSet broadcasts a v2 transaction set to all peers. + BroadcastV2TransactionSet(index types.ChainIndex, txns []types.V2Transaction) + + // Close shuts down the Syncer. + Close() error + + // Connect forms an outbound connection to a peer. + Connect(ctx context.Context, addr string) (*syncer.Peer, error) + + // PeerInfo returns the information about the current peers. + PeerInfo() []syncer.PeerInfo + + // Peers returns the set of currently-connected peers. + Peers() []*syncer.Peer + + // Synced returns if the syncer is synced to the blockchain. + Synced() bool +} diff --git a/modules/syncer/store.go b/modules/syncer/store.go new file mode 100644 index 0000000..33c1a8f --- /dev/null +++ b/modules/syncer/store.go @@ -0,0 +1,224 @@ +package syncer + +import ( + "encoding/json" + "net" + "os" + "sync" + "time" + + core "go.sia.tech/coreutils/syncer" +) + +type peerBan struct { + Expiry time.Time `json:"expiry"` + Reason string `json:"reason"` +} + +// EphemeralPeerStore implements PeerStore with an in-memory map. +type EphemeralPeerStore struct { + peers map[string]core.PeerInfo + bans map[string]peerBan + mu sync.Mutex +} + +func (eps *EphemeralPeerStore) banned(addr string) (bool, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false, err // shouldn't happen + } + for _, s := range []string{ + addr, // 1.2.3.4:5678 + core.Subnet(host, "/32"), // 1.2.3.4:* + core.Subnet(host, "/24"), // 1.2.3.* + core.Subnet(host, "/16"), // 1.2.* + core.Subnet(host, "/8"), // 1.* + } { + if b, ok := eps.bans[s]; ok { + if time.Until(b.Expiry) <= 0 { + delete(eps.bans, s) + } else { + return true, nil + } + } + } + return false, nil +} + +// AddPeer implements PeerStore. +func (eps *EphemeralPeerStore) AddPeer(addr string) error { + eps.mu.Lock() + defer eps.mu.Unlock() + if _, ok := eps.peers[addr]; !ok { + eps.peers[addr] = core.PeerInfo{Address: addr, FirstSeen: time.Now()} + } + return nil +} + +// Peers implements PeerStore. +func (eps *EphemeralPeerStore) Peers() ([]core.PeerInfo, error) { + eps.mu.Lock() + defer eps.mu.Unlock() + var peers []core.PeerInfo + for addr, p := range eps.peers { + banned, err := eps.banned(addr) + if err != nil { + return nil, err + } + if !banned { + peers = append(peers, p) + } + } + return peers, nil +} + +// PeerInfo implements PeerStore. +func (eps *EphemeralPeerStore) PeerInfo(addr string) (core.PeerInfo, error) { + eps.mu.Lock() + defer eps.mu.Unlock() + info, ok := eps.peers[addr] + if !ok { + return core.PeerInfo{}, core.ErrPeerNotFound + } + return info, nil +} + +// UpdatePeerInfo implements PeerStore. +func (eps *EphemeralPeerStore) UpdatePeerInfo(addr string, fn func(*core.PeerInfo)) error { + eps.mu.Lock() + defer eps.mu.Unlock() + info, ok := eps.peers[addr] + if !ok { + return core.ErrPeerNotFound + } + fn(&info) + eps.peers[addr] = info + return nil +} + +// Ban implements PeerStore. +func (eps *EphemeralPeerStore) Ban(addr string, duration time.Duration, reason string) error { + eps.mu.Lock() + defer eps.mu.Unlock() + // Canonicalize. + if _, ipnet, err := net.ParseCIDR(addr); err == nil { + addr = ipnet.String() + } + eps.bans[addr] = peerBan{Expiry: time.Now().Add(duration), Reason: reason} + return nil +} + +// Banned implements PeerStore. +func (eps *EphemeralPeerStore) Banned(addr string) (bool, error) { + eps.mu.Lock() + defer eps.mu.Unlock() + return eps.banned(addr) +} + +// NewEphemeralPeerStore initializes an EphemeralPeerStore. +func NewEphemeralPeerStore() *EphemeralPeerStore { + return &EphemeralPeerStore{ + peers: make(map[string]core.PeerInfo), + bans: make(map[string]peerBan), + } +} + +type jsonPersist struct { + Peers map[string]core.PeerInfo `json:"peers"` + Bans map[string]peerBan `json:"bans"` +} + +// JSONPeerStore implements PeerStore with a JSON file on disk. +type JSONPeerStore struct { + *EphemeralPeerStore + path string + lastSave time.Time +} + +func (jps *JSONPeerStore) load() error { + f, err := os.Open(jps.path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + defer f.Close() + var p jsonPersist + if err := json.NewDecoder(f).Decode(&p); err != nil { + return err + } + jps.EphemeralPeerStore.peers = p.Peers + jps.EphemeralPeerStore.bans = p.Bans + return nil +} + +func (jps *JSONPeerStore) save() error { + jps.EphemeralPeerStore.mu.Lock() + defer jps.EphemeralPeerStore.mu.Unlock() + if time.Since(jps.lastSave) < 5*time.Second { + return nil + } + defer func() { jps.lastSave = time.Now() }() + // Clear out expired bans. + for peer, b := range jps.EphemeralPeerStore.bans { + if time.Until(b.Expiry) <= 0 { + delete(jps.EphemeralPeerStore.bans, peer) + } + } + p := jsonPersist{ + Peers: jps.EphemeralPeerStore.peers, + Bans: jps.EphemeralPeerStore.bans, + } + js, err := json.MarshalIndent(p, "", " ") + if err != nil { + return err + } + f, err := os.OpenFile(jps.path+"_tmp", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660) + if err != nil { + return err + } + defer f.Close() + if _, err = f.Write(js); err != nil { + return err + } else if f.Sync(); err != nil { + return err + } else if f.Close(); err != nil { + return err + } else if err := os.Rename(jps.path+"_tmp", jps.path); err != nil { + return err + } + return nil +} + +// AddPeer implements PeerStore. +func (jps *JSONPeerStore) AddPeer(addr string) error { + if err := jps.EphemeralPeerStore.AddPeer(addr); err != nil { + return err + } + return jps.save() +} + +// UpdatePeerInfo implements PeerStore. +func (jps *JSONPeerStore) UpdatePeerInfo(addr string, fn func(*core.PeerInfo)) error { + if err := jps.EphemeralPeerStore.UpdatePeerInfo(addr, fn); err != nil { + return err + } + return jps.save() +} + +// Ban implements PeerStore. +func (jps *JSONPeerStore) Ban(addr string, duration time.Duration, reason string) error { + if err := jps.EphemeralPeerStore.Ban(addr, duration, reason); err != nil { + return err + } + return jps.save() +} + +// NewJSONPeerStore returns a JSONPeerStore backed by the specified file. +func NewJSONPeerStore(path string) (*JSONPeerStore, error) { + jps := &JSONPeerStore{ + EphemeralPeerStore: NewEphemeralPeerStore(), + path: path, + } + return jps, jps.load() +} diff --git a/modules/syncer/syncer.go b/modules/syncer/syncer.go new file mode 100644 index 0000000..ea0c2bf --- /dev/null +++ b/modules/syncer/syncer.go @@ -0,0 +1,174 @@ +package syncer + +import ( + "context" + "net" + "path/filepath" + + "github.com/mike76-dev/sia-satellite/modules" + "github.com/mike76-dev/sia-satellite/persist" + "go.sia.tech/core/gateway" + "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" + "go.sia.tech/coreutils/syncer" + "go.uber.org/zap" +) + +// Network bootstrap. +var ( + bootstrapPeers = []string{ + "108.227.62.195:9981", + "139.162.81.190:9991", + "144.217.7.188:9981", + "147.182.196.252:9981", + "15.235.85.30:9981", + "167.235.234.84:9981", + "173.235.144.230:9981", + "198.98.53.144:7791", + "199.27.255.169:9981", + "2.136.192.200:9981", + "213.159.50.43:9981", + "24.253.116.61:9981", + "46.249.226.103:9981", + "5.165.236.113:9981", + "5.252.226.131:9981", + "54.38.120.222:9981", + "62.210.136.25:9981", + "63.135.62.123:9981", + "65.21.93.245:9981", + "75.165.149.114:9981", + "77.51.200.125:9981", + "81.6.58.121:9981", + "83.194.193.156:9981", + "84.39.246.63:9981", + "87.99.166.34:9981", + "91.214.242.11:9981", + "93.105.88.181:9981", + "93.180.191.86:9981", + "94.130.220.162:9981", + } +) + +// We consider ourselves synced if minSyncedPeers say that we are. +const minSyncedPeers = 5 + +// A Syncer synchronizes blockchain data with peers. +type Syncer struct { + s *syncer.Syncer + ps syncer.PeerStore + l net.Listener + log *zap.Logger + closeFn func() +} + +// Synced returns if the syncer is synced to the blockchain. +func (s *Syncer) Synced() bool { + var count int + for _, peer := range s.Peers() { + if peer.Synced() { + count++ + } + } + + return count >= minSyncedPeers +} + +// Run spawns goroutines for accepting inbound connections, forming outbound +// connections, and syncing the blockchain from active peers. It blocks until an +// error occurs, upon which all connections are closed and goroutines are +// terminated. +func (s *Syncer) Run() error { + return s.s.Run() +} + +// Connect forms an outbound connection to a peer. +func (s *Syncer) Connect(ctx context.Context, addr string) (*syncer.Peer, error) { + return s.s.Connect(ctx, addr) +} + +// BroadcastHeader broadcasts a header to all peers. +func (s *Syncer) BroadcastHeader(h gateway.BlockHeader) { s.s.BroadcastHeader(h) } + +// BroadcastV2Header broadcasts a v2 header to all peers. +func (s *Syncer) BroadcastV2Header(h gateway.V2BlockHeader) { s.s.BroadcastV2Header(h) } + +// BroadcastV2BlockOutline broadcasts a v2 block outline to all peers. +func (s *Syncer) BroadcastV2BlockOutline(b gateway.V2BlockOutline) { s.s.BroadcastV2BlockOutline(b) } + +// BroadcastTransactionSet broadcasts a transaction set to all peers. +func (s *Syncer) BroadcastTransactionSet(txns []types.Transaction) { s.s.BroadcastTransactionSet(txns) } + +// BroadcastV2TransactionSet broadcasts a v2 transaction set to all peers. +func (s *Syncer) BroadcastV2TransactionSet(index types.ChainIndex, txns []types.V2Transaction) { + s.s.BroadcastV2TransactionSet(index, txns) +} + +// Peers returns the set of currently-connected peers. +func (s *Syncer) Peers() []*syncer.Peer { + return s.s.Peers() +} + +// PeerInfo returns the information about the current peers. +func (s *Syncer) PeerInfo() []syncer.PeerInfo { + info, _ := s.ps.Peers() + return info +} + +// Addr returns the address of the Syncer. +func (s *Syncer) Addr() string { + return s.s.Addr() +} + +// Close shuts down the Syncer. +func (s *Syncer) Close() error { + err := s.l.Close() + if err != nil { + s.log.Sugar().Error("unable to close listener", err) + } + s.closeFn() + return err +} + +// New returns a new Syncer. +func New(cm *chain.Manager, addr, dir string) (*Syncer, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, modules.AddContext(err, "unable to start listener") + } + + syncerAddr := l.Addr().String() + host, port, _ := net.SplitHostPort(syncerAddr) + if ip := net.ParseIP(host); ip == nil || ip.IsUnspecified() { + syncerAddr = net.JoinHostPort("127.0.0.1", port) + } + + ps, err := NewJSONPeerStore(filepath.Join(dir, "peers.json")) + if err != nil { + return nil, modules.AddContext(err, "unable to create store") + } + for _, peer := range bootstrapPeers { + ps.AddPeer(peer) + } + + _, genesisBlock := chain.Mainnet() + header := gateway.Header{ + GenesisID: genesisBlock.ID(), + UniqueID: gateway.GenerateUniqueID(), + NetAddress: syncerAddr, + } + + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "syncer.log")) + if err != nil { + return nil, modules.AddContext(err, "unable to create logger") + } + + s := syncer.New(l, cm, ps, header, syncer.WithLogger(logger)) + + return &Syncer{ + s: s, + ps: ps, + l: l, + log: logger, + closeFn: closeFn, + }, nil +} diff --git a/modules/transactionpool.go b/modules/transactionpool.go deleted file mode 100644 index abb02b1..0000000 --- a/modules/transactionpool.go +++ /dev/null @@ -1,188 +0,0 @@ -package modules - -import ( - "errors" - "strings" - - "go.sia.tech/core/types" -) - -const ( - // TransactionSetSizeLimit defines the largest set of dependent unconfirmed - // transactions that will be accepted by the transaction pool. - TransactionSetSizeLimit = 250e3 - - // TransactionSizeLimit defines the size of the largest transaction that - // will be accepted by the transaction pool according to the IsStandard - // rules. - TransactionSizeLimit = 32e3 - - // consensusConflictPrefix is the prefix of every ConsensusConflict. - consensusConflictPrefix = "consensus conflict: " -) - -var ( - // ErrDuplicateTransactionSet is the error that gets returned if a - // duplicate transaction set is given to the transaction pool. - ErrDuplicateTransactionSet = errors.New("transaction set contains only duplicate transactions") - - // ErrInvalidArbPrefix is the error that gets returned if a transaction is - // submitted to the transaction pool which contains a prefix that is not - // recognized. This helps prevent miners on old versions from mining - // potentially illegal transactions in the event of a soft-fork. - ErrInvalidArbPrefix = errors.New("transaction contains non-standard arbitrary data") - - // ErrLargeTransaction is the error that gets returned if a transaction - // provided to the transaction pool is larger than what is allowed by the - // IsStandard rules. - ErrLargeTransaction = errors.New("transaction is too large for this transaction pool") - - // ErrLargeTransactionSet is the error that gets returned if a transaction - // set given to the transaction pool is larger than the limit placed by the - // IsStandard rules of the transaction pool. - ErrLargeTransactionSet = errors.New("transaction set is too large for this transaction pool") - - // PrefixNonSia defines the prefix that should be appended to any - // transactions that use the arbitrary data for reasons outside of the - // standard Sia protocol. This will prevent these transactions from being - // rejected by the IsStandard set of rules, but also means that the data - // will never be used within the formal Sia protocol. - PrefixNonSia = types.NewSpecifier("NonSia") - - // PrefixHostAnnouncement is used to indicate that a transaction's - // Arbitrary Data field contains a host announcement. The encoded - // announcement will follow this prefix. - PrefixHostAnnouncement = types.NewSpecifier("HostAnnouncement") - - // PrefixFileContractIdentifier is used to indicate that a transaction's - // Arbitrary Data field contains a file contract identifier. The identifier - // and its signature will follow this prefix. - PrefixFileContractIdentifier = types.NewSpecifier("FCIdentifier") -) - -type ( - // ConsensusConflict implements the error interface, and indicates that a - // transaction was rejected due to being incompatible with the current - // consensus set, meaning either a double spend or a consensus rule violation - - // it is unlikely that the transaction will ever be valid. - ConsensusConflict string - - // TransactionSetID is a type-safe wrapper for a types.Hash256 that - // represents the ID of an entire transaction set. - TransactionSetID types.Hash256 - - // A TransactionPoolDiff indicates the adding or removal of a transaction set to - // the transaction pool. The transactions in the pool are not persisted, so at - // startup modules should assume an empty transaction pool. - TransactionPoolDiff struct { - AppliedTransactions []*UnconfirmedTransactionSet - RevertedTransactions []TransactionSetID - } - - // UnconfirmedTransactionSet defines a new unconfirmed transaction that has - // been added to the transaction pool. ID is the ID of the set, IDs contains - // an ID for each transaction, eliminating the need to recompute it (because - // that's an expensive operation). - UnconfirmedTransactionSet struct { - Change *ConsensusChange - ID TransactionSetID - - IDs []types.TransactionID - Sizes []uint64 - Transactions []types.Transaction - } -) - -type ( - // A TransactionPoolSubscriber receives updates about the confirmed and - // unconfirmed set from the transaction pool. Generally, there is no need to - // subscribe to both the consensus set and the transaction pool. - TransactionPoolSubscriber interface { - // ReceiveTransactionPoolUpdate notifies subscribers of a change to the - // consensus set and/or unconfirmed set, and includes the consensus change - // that would result if all of the transactions made it into a block. - ReceiveUpdatedUnconfirmedTransactions(*TransactionPoolDiff) - } - - // A TransactionPool manages unconfirmed transactions. - TransactionPool interface { - Alerter - - // AcceptTransactionSet accepts a set of potentially interdependent - // transactions. - AcceptTransactionSet([]types.Transaction) error - - // Broadcast broadcasts a transaction set to all of the transaction pool's - // peers. - Broadcast(ts []types.Transaction) - - // Close is necessary for clean shutdown (e.g. during testing). - Close() error - - // FeeEstimation returns an estimation for how high the transaction fee - // needs to be per byte. The minimum recommended targets getting accepted - // in ~3 blocks, and the maximum recommended targets getting accepted - // immediately. Taking the average has a moderate chance of being accepted - // within one block. The minimum has a strong chance of getting accepted - // within 10 blocks. - FeeEstimation() (minimumRecommended, maximumRecommended types.Currency) - - // Transaction returns the transaction and unconfirmed parents - // corresponding to the provided transaction id. - Transaction(id types.TransactionID) (txn types.Transaction, unconfirmedParents []types.Transaction, exists bool) - - // Transactions returns the transactions of the transaction pool. - Transactions() []types.Transaction - - // TransactionConfirmed returns true if the transaction has been seen on the - // blockchain. Note, however, that the block containing the transaction may - // later be invalidated by a reorg. - TransactionConfirmed(id types.TransactionID) (bool, error) - - // TransactionList returns a list of all transactions in the transaction - // pool. The transactions are provided in an order that can acceptably be - // put into a block. - TransactionList() []types.Transaction - - // TransactionPoolSubscribe adds a subscriber to the transaction pool. - // Subscribers will receive all consensus set changes as well as - // transaction pool changes, and should not subscribe to both. - TransactionPoolSubscribe(TransactionPoolSubscriber) - - // TransactionSet returns the transaction set the provided object - // appears in. - TransactionSet(types.Hash256) []types.Transaction - - // Unsubscribe removes a subscriber from the transaction pool. - Unsubscribe(TransactionPoolSubscriber) - } -) - -// NewConsensusConflict returns a consensus conflict, which implements the -// error interface. -func NewConsensusConflict(s string) ConsensusConflict { - return ConsensusConflict(consensusConflictPrefix + s) -} - -// Error implements the error interface, turning the consensus conflict into an -// acceptable error type. -func (cc ConsensusConflict) Error() string { - return string(cc) -} - -// IsConsensusConflict returns true if err is a ConsensusConflict. -func IsConsensusConflict(err error) bool { - return strings.HasPrefix(err.Error(), consensusConflictPrefix) -} - -// CalculateFee returns the fee-per-byte of a transaction set. -func CalculateFee(ts []types.Transaction) types.Currency { - var sum types.Currency - for _, t := range ts { - for _, fee := range t.MinerFees { - sum = sum.Add(fee) - } - } - size := types.EncodedLen(ts) - return sum.Div64(uint64(size)) -} diff --git a/modules/transactionpool/accept.go b/modules/transactionpool/accept.go deleted file mode 100644 index ee2d93e..0000000 --- a/modules/transactionpool/accept.go +++ /dev/null @@ -1,419 +0,0 @@ -package transactionpool - -// TODO: It seems like the transaction pool is not properly detecting conflicts -// between a file contract revision and a file contract. - -import ( - "errors" - "io" - "math" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - errEmptySet = errors.New("transaction set is empty") - errLowMinerFees = errors.New("transaction set needs more miner fees to be accepted") -) - -// transactionSet is a helper type to facilitate encoding transaction sets. -type transactionSet []types.Transaction - -// EncodeTo implements types.EncoderTo. -func (ts transactionSet) EncodeTo(e *types.Encoder) { - e.WritePrefix(len(ts)) - for _, tx := range ts { - tx.EncodeTo(e) - } -} - -// relatedObjectIDs determines all of the object ids related to a transaction. -func relatedObjectIDs(ts []types.Transaction) []ObjectID { - oidMap := make(map[ObjectID]struct{}) - for _, t := range ts { - for _, sci := range t.SiacoinInputs { - oidMap[ObjectID(sci.ParentID)] = struct{}{} - } - for i := range t.SiacoinOutputs { - oidMap[ObjectID(t.SiacoinOutputID(i))] = struct{}{} - } - for i := range t.FileContracts { - oidMap[ObjectID(t.FileContractID(i))] = struct{}{} - } - for _, fcr := range t.FileContractRevisions { - oidMap[ObjectID(fcr.ParentID)] = struct{}{} - } - for _, sp := range t.StorageProofs { - oidMap[ObjectID(sp.ParentID)] = struct{}{} - } - for _, sfi := range t.SiafundInputs { - oidMap[ObjectID(sfi.ParentID)] = struct{}{} - } - for i := range t.SiafundOutputs { - oidMap[ObjectID(t.SiafundOutputID(i))] = struct{}{} - } - } - - var oids []ObjectID - for oid := range oidMap { - oids = append(oids, oid) - } - return oids -} - -// requiredFeesToExtendTpoolAtSize returns the fees that should be required to -// extend the transaction pool for a given size of transaction pool. -// -// NOTE: This function ignores the minimum transaction pool size required for a -// fee. -func requiredFeesToExtendTpoolAtSize(size int) types.Currency { - // Calculate the fee required to bump out the size of the transaction pool. - ratioToTarget := float64(size) / TransactionPoolSizeTarget - feeFactor := math.Pow(ratioToTarget, TransactionPoolExponentiation) - return modules.FromFloat(feeFactor / 1000) // Divide by 1000 to get SC/kb. -} - -// requiredFeesToExtendTpool returns the amount of fees required to extend the -// transaction pool to fit another transaction set. The amount returned has the -// unit 'currency per byte'. -func (tp *TransactionPool) requiredFeesToExtendTpool() types.Currency { - // If the transaction pool is nearly empty, it can be extended even if there - // are no fees. - if tp.transactionListSize < TransactionPoolSizeForFee { - return types.ZeroCurrency - } - - return requiredFeesToExtendTpoolAtSize(tp.transactionListSize) -} - -// checkTransactionSetComposition checks if the transaction set is valid given -// the state of the pool. It does not check that each individual transaction -// would be legal in the next block, but does check things like miner fees and -// IsStandard. -func (tp *TransactionPool) checkTransactionSetComposition(ts []types.Transaction) (uint64, error) { - // Check that the transaction set is not already known. - h := types.NewHasher() - transactionSet(ts).EncodeTo(h.E) - setID := modules.TransactionSetID(h.Sum()) - _, exists := tp.transactionSets[setID] - if exists { - return 0, modules.ErrDuplicateTransactionSet - } - - // All checks after this are expensive. - // - // TODO: There is no DoS prevention mechanism in place to prevent repeated - // expensive verifications of invalid transactions that are created on the - // fly. - - // Check that all transactions follow the guidelines. - setSize, err := isStandardTransactionSet(ts) - if err != nil { - return 0, err - } - - return setSize, nil -} - -// handleConflicts will return a transaction set which contains all unconfirmed -// transactions which are related (descendent or ancestor) in some way to any of -// the input transaction set. -func (tp *TransactionPool) handleConflicts(ts []types.Transaction, conflicts []modules.TransactionSetID, txnFn func([]types.Transaction) (modules.ConsensusChange, error)) ([]types.Transaction, error) { - // Create a list of all the transaction ids that compose the set of - // conflicts. - conflictMap := make(map[types.TransactionID]modules.TransactionSetID) - for _, conflict := range conflicts { - conflictSet := tp.transactionSets[conflict] - for _, conflictTxn := range conflictSet { - conflictMap[conflictTxn.ID()] = conflict - } - } - - // Discard all duplicate transactions from the input transaction set. - var dedupSet []types.Transaction - for _, t := range ts { - _, exists := conflictMap[t.ID()] - if exists { - continue - } - dedupSet = append(dedupSet, t) - } - if len(dedupSet) == 0 { - return nil, modules.ErrDuplicateTransactionSet - } - // If transactions were pruned, it's possible that the set of - // dependencies/conflicts has also reduced. To minimize computational load - // on the consensus set, we want to prune out all of the conflicts that are - // no longer relevant. As an example, consider the transaction set {A}, the - // set {B}, and the new set {A, C}, where C is dependent on B. {A} and {B} - // are both conflicts, but after deduplication {A} is no longer a conflict. - // This is recursive, but it is guaranteed to run only once as the first - // deduplication is guaranteed to be complete. - if len(dedupSet) < len(ts) { - oids := relatedObjectIDs(dedupSet) - var conflicts []modules.TransactionSetID - for _, oid := range oids { - conflict, exists := tp.knownObjects[oid] - if exists { - conflicts = append(conflicts, conflict) - } - } - return tp.handleConflicts(dedupSet, conflicts, txnFn) - } - - // Merge all of the conflict sets with the input set (input set goes last - // to preserve dependency ordering), and see if the set as a whole is both - // small enough to be legal and valid as a set. If no, return an error. If - // yes, add the new set to the pool, and eliminate the old set. The output - // diff objects can be repeated, (no need to remove those). Just need to - // remove the conflicts from tp.transactionSets. - var superset []types.Transaction - supersetMap := make(map[modules.TransactionSetID]struct{}) - for _, conflict := range conflictMap { - supersetMap[conflict] = struct{}{} - } - for conflict := range supersetMap { - superset = append(superset, tp.transactionSets[conflict]...) - } - superset = append(superset, dedupSet...) - - // Check the composition of the transaction set, including fees and - // IsStandard rules (this is a new set, the rules must be rechecked). - setSize, err := tp.checkTransactionSetComposition(superset) - if err != nil { - return nil, err - } - - // Check that the transaction set has enough fees to justify adding it to - // the transaction list. - requiredFees := tp.requiredFeesToExtendTpool().Mul64(setSize) - var setFees types.Currency - for _, txn := range superset { - for _, fee := range txn.MinerFees { - setFees = setFees.Add(fee) - } - } - if requiredFees.Cmp(setFees) > 0 { - // TODO: check if there is an existing set with lower fees that we can - // kick out. - return nil, errLowMinerFees - } - - // Check that the transaction set is valid. - cc, err := txnFn(superset) - if err != nil { - return nil, modules.NewConsensusConflict("provided transaction set has prereqs, but is still invalid: " + err.Error()) - } - - // Remove the conflicts from the transaction pool. - for conflict := range supersetMap { - conflictSet := tp.transactionSets[conflict] - tp.transactionListSize -= types.EncodedLen(transactionSet(conflictSet)) - delete(tp.transactionSets, conflict) - delete(tp.transactionSetDiffs, conflict) - } - - // Add the transaction set to the pool. - h := types.NewHasher() - transactionSet(superset).EncodeTo(h.E) - setID := modules.TransactionSetID(h.Sum()) - tp.transactionSets[setID] = superset - for _, diff := range cc.SiacoinOutputDiffs { - tp.knownObjects[ObjectID(diff.ID)] = setID - } - for _, diff := range cc.FileContractDiffs { - tp.knownObjects[ObjectID(diff.ID)] = setID - } - for _, diff := range cc.SiafundOutputDiffs { - tp.knownObjects[ObjectID(diff.ID)] = setID - } - tp.transactionSetDiffs[setID] = &cc - tsetSize := types.EncodedLen(transactionSet(superset)) - tp.transactionListSize += tsetSize - for _, txn := range superset { - if _, exists := tp.transactionHeights[txn.ID()]; !exists { - tp.transactionHeights[txn.ID()] = tp.blockHeight - } - } - - return superset, nil -} - -// acceptTransactionSet verifies that a transaction set is allowed to be in the -// transaction pool, and then adds it to the transaction pool. -func (tp *TransactionPool) acceptTransactionSet(ts []types.Transaction, txnFn func([]types.Transaction) (modules.ConsensusChange, error)) (superset []types.Transaction, err error) { - if len(ts) == 0 { - return nil, errEmptySet - } - - // Remove all transactions that have been confirmed in the transaction set. - oldTS := ts - ts = []types.Transaction{} - for _, txn := range oldTS { - if !tp.transactionConfirmed(txn.ID()) { - ts = append(ts, txn) - } - } - // If no transactions remain, return a dublicate error. - if len(ts) == 0 { - return nil, modules.ErrDuplicateTransactionSet - } - - // Check the composition of the transaction set. - setSize, err := tp.checkTransactionSetComposition(ts) - if err != nil { - return nil, err - } - - // Check that the transaction set has enough fees to justify adding it to - // the transaction list. - requiredFees := tp.requiredFeesToExtendTpool().Mul64(setSize) - var setFees types.Currency - for _, txn := range ts { - for _, fee := range txn.MinerFees { - setFees = setFees.Add(fee) - } - } - if requiredFees.Cmp(setFees) > 0 { - return nil, errLowMinerFees - } - - // Check for conflicts with other transactions, which would indicate a - // double-spend. Legal children of a transaction set will also trigger the - // conflict-detector. - oids := relatedObjectIDs(ts) - var conflicts []modules.TransactionSetID - for _, oid := range oids { - conflict, exists := tp.knownObjects[oid] - if exists { - conflicts = append(conflicts, conflict) - } - } - if len(conflicts) > 0 { - return tp.handleConflicts(ts, conflicts, txnFn) - } - cc, err := txnFn(ts) - if err != nil { - return nil, modules.NewConsensusConflict("provided transaction set is invalid: " + err.Error()) - } - - // Add the transaction set to the pool. - h := types.NewHasher() - transactionSet(ts).EncodeTo(h.E) - setID := modules.TransactionSetID(h.Sum()) - tp.transactionSets[setID] = ts - for _, oid := range oids { - tp.knownObjects[oid] = setID - } - tp.transactionSetDiffs[setID] = &cc - tsetSize := types.EncodedLen(transactionSet(ts)) - tp.transactionListSize += tsetSize - for _, txn := range ts { - if _, exists := tp.transactionHeights[txn.ID()]; !exists { - tp.transactionHeights[txn.ID()] = tp.blockHeight - } - } - - return ts, nil -} - -// submitTransactionSet will submit a transaction set to the transaction pool -// and return the minimum superset for that transaction set. -func (tp *TransactionPool) submitTransactionSet(ts []types.Transaction) ([]types.Transaction, error) { - // Assert on consensus set to get special method. - cs, ok := tp.consensusSet.(interface { - LockedTryTransactionSet(fn func(func(txns []types.Transaction) (modules.ConsensusChange, error)) error) error - }) - if !ok { - return nil, errors.New("consensus set does not support LockedTryTransactionSet method") - } - - var superset []types.Transaction - var acceptErr error - err := cs.LockedTryTransactionSet(func(txnFn func(txns []types.Transaction) (modules.ConsensusChange, error)) error { - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - // Attempt to get the transaction set into the transaction pool. - superset, acceptErr = tp.acceptTransactionSet(ts, txnFn) - if acceptErr != nil { - return acceptErr - } - - // Notify subscribers of an accepted transaction set. - tp.updateSubscribersTransactions() - return nil - }) - if err != nil { - return nil, err - } - - // Get the minimum required set from the superset for the input transaction - // set. - minSuperSet := modules.MinimumTransactionSet(ts, superset) - return minSuperSet, nil -} - -// AcceptTransactionSet adds a transaction to the unconfirmed set of -// transactions. If the transaction is accepted, it will be relayed to -// connected peers. -func (tp *TransactionPool) AcceptTransactionSet(ts []types.Transaction) error { - if err := tp.tg.Add(); err != nil { - return err - } - defer tp.tg.Done() - - minSuperSet, err := tp.submitTransactionSet(ts) - if err != nil { - return err - } - - go tp.gateway.Broadcast("RelayTransactionSet", transactionSet(minSuperSet), tp.gateway.Peers()) - return nil -} - -// relayTransactionSet is an RPC that accepts a transaction set from a peer. If -// the accept is successful, the transaction will be relayed to the gateway's -// other peers. -func (tp *TransactionPool) relayTransactionSet(conn modules.PeerConn) error { - if err := tp.tg.Add(); err != nil { - return err - } - defer tp.tg.Done() - - // Connection stability and cleanup code. - err := conn.SetDeadline(time.Now().Add(relayTransactionSetTimeout)) - if err != nil { - return err - } - - // Automatically close the channel when tg.Stop() is called. - finishedChan := make(chan struct{}) - defer close(finishedChan) - go func() { - select { - case <-tp.tg.StopChan(): - case <-finishedChan: - } - conn.Close() - }() - - // Read the transaction. - d := types.NewDecoder(io.LimitedReader{R: conn, N: int64(modules.BlockSizeLimit + 8)}) - _ = d.ReadUint64() - ts := make([]types.Transaction, d.ReadPrefix()) - for i := 0; i < len(ts); i++ { - ts[i].DecodeFrom(d) - if err := d.Err(); err != nil { - return err - } - } - return tp.AcceptTransactionSet(ts) -} diff --git a/modules/transactionpool/alert.go b/modules/transactionpool/alert.go deleted file mode 100644 index d11b279..0000000 --- a/modules/transactionpool/alert.go +++ /dev/null @@ -1,8 +0,0 @@ -package transactionpool - -import "github.com/mike76-dev/sia-satellite/modules" - -// Alerts implements the modules.Alerter interface for the transactionpool. -func (tpool *TransactionPool) Alerts() (crit, err, warn, info []modules.Alert) { - return -} diff --git a/modules/transactionpool/consts.go b/modules/transactionpool/consts.go deleted file mode 100644 index 1f049ac..0000000 --- a/modules/transactionpool/consts.go +++ /dev/null @@ -1,74 +0,0 @@ -package transactionpool - -import ( - "time" - - "go.sia.tech/core/types" -) - -// Consts related to the persisting structures of the transactoin pool. -const ( - logFile = "transactionpool.log" -) - -// Constants related to the size and ease-of-entry of the transaction pool. -const ( - // TransactionPoolFeeExponentiation defines the polynomial rate of growth - // required to keep putting transactions into the transaction pool. If the - // exponentiation is 2, then doubling the size of the transaction pool - // requires quadrupling the fees of the transactions being added. A higher - // number makes it harder for the transaction pool to grow beyond its - // default size during times of congestion. - TransactionPoolExponentiation = 3 - - // TransactionPoolSizeForFee defines how large the transaction pool needs to - // be before it starts expecting fees to be on the transaction. This initial - // limit is to help the network grow and provide some wiggle room for - // wallets that are not yet able to operate via a fee market. - TransactionPoolSizeForFee = 500e3 - - // TransactionPoolSizeTarget defines the target size of the pool when the - // transactions are paying 1 SC/kb in fees. - TransactionPoolSizeTarget = 3e6 -) - -// Constants related to fee estimation. -const ( - // blockFeeEstimationDepth defines how far backwards in the blockchain the - // fee estimator looks when using blocks to figure out the appropriate fees - // to add to transactions. - blockFeeEstimationDepth = 6 - - // maxMultiplier defines the general gap between the maximum recommended fee - // and the minimum recommended fee. - maxMultiplier = 3 - - // feeEstimationConstantPadding is the constant amount of padding added to - // the current tpool size when estimating a good fee rate for new - // transactions. - feeEstimationConstantPadding = 250e3 - - // feeEstimationProportionalPadding is the amount of proportional padding - // added to the current tpool size when estimating a good fee rate for new - // transactions. - feeEstimationProportionalPadding = 1.25 -) - -// Variables related to the size and ease-of-entry of the transaction pool. -var ( - // minEstimation defines a sane minimum fee per byte for transactions. This - // will typically be only suggested as a fee in the absence of congestion. - minEstimation = types.HastingsPerSiacoin.Div64(100).Div64(1e3) -) - -// Variables related to propagating transactions through the network. -var ( - // relayTransactionSetTimeout establishes the timeout for a relay - // transaction set call. - relayTransactionSetTimeout = 3 * time.Minute - - // MaxTransactionAge determines the maximum age of a transaction (in block - // height) allowed before the transaction is pruned from the transaction - // pool. - MaxTransactionAge = uint64(24) -) diff --git a/modules/transactionpool/persist.go b/modules/transactionpool/persist.go deleted file mode 100644 index c9906be..0000000 --- a/modules/transactionpool/persist.go +++ /dev/null @@ -1,211 +0,0 @@ -package transactionpool - -import ( - "fmt" - "path/filepath" - "time" - - "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - - "go.sia.tech/core/types" -) - -const tpoolSyncRate = time.Minute * 2 - -// threadedRegularSync will make sure that sync gets called on the database -// every once in a while. -func (tp *TransactionPool) threadedRegularSync() { - if err := tp.tg.Add(); err != nil { - return - } - defer tp.tg.Done() - for { - select { - case <-tp.tg.StopChan(): - // A queued AfterStop will close out the db properly. - return - case <-time.After(tpoolSyncRate): - tp.outerMu.Lock() - tp.innerMu.Lock() - tp.syncDB() - tp.innerMu.Unlock() - tp.outerMu.Unlock() - } - } -} - -// syncDB commits the current global transaction and immediately begins a new -// one. -func (tp *TransactionPool) syncDB() { - // Commit the existing tx. - err := tp.dbTx.Commit() - if err != nil { - tp.log.Severe("ERROR: failed to apply database update:", err) - tp.dbTx.Rollback() - } - // Begin a new tx. - tp.dbTx, err = tp.db.Begin() - if err != nil { - tp.log.Severe("ERROR: failed to initialize a db transaction:", err) - } -} - -// resetDB deletes all consensus related persistence from the transaction pool. -func (tp *TransactionPool) resetDB() error { - _, err := tp.dbTx.Exec("DELETE FROM tp_ctx") - if err != nil { - return err - } - err = tp.putRecentBlockID(types.BlockID{}) - if err != nil { - return err - } - err = tp.putRecentConsensusChange(modules.ConsensusChangeBeginning) - if err != nil { - return err - } - return tp.putBlockHeight(0) -} - -// initPersist initializes the database. -func (tp *TransactionPool) initPersist(dir string) error { - // Create the tpool logger. - var err error - tp.log, err = persist.NewFileLogger(filepath.Join(dir, logFile)) - if err != nil { - return modules.AddContext(err, "unable to initialize the transaction pool logger") - } - tp.tg.AfterStop(func() { - err := tp.log.Close() - if err != nil { - fmt.Println("Unable to close the transaction pool logger:", err) - } - }) - - // Create the global tpool tx that will be used for most persist actions. - tp.dbTx, err = tp.db.Begin() - if err != nil { - return modules.AddContext(err, "unable to begin tpool dbTx") - } - tp.tg.AfterStop(func() { - tp.outerMu.Lock() - tp.innerMu.Lock() - err := tp.dbTx.Commit() - tp.innerMu.Unlock() - tp.outerMu.Unlock() - if err != nil { - tp.log.Println("ERROR: unable to close transaction properly during shutdown:", err) - } - }) - - // Spin up the thread that occasionally synchronizes the database. - go tp.threadedRegularSync() - - // Get the recent consensus change. - cc, err := tp.getRecentConsensusChange() - if modules.ContainsError(err, errNilConsensusChange) { - err = tp.putRecentConsensusChange(modules.ConsensusChangeBeginning) - } - if err != nil { - return modules.AddContext(err, "unable to initialize the recent consensus change in the tpool") - } - - // Get the most recent block height. - bh, err := tp.getBlockHeight() - if err != nil { - tp.log.Println("INFO: block height is reporting as zero, setting up to subscribe from the beginning.") - err = tp.putBlockHeight(0) - if err != nil { - return modules.AddContext(err, "unable to initialize the block height in the tpool") - } - err = tp.putRecentConsensusChange(modules.ConsensusChangeBeginning) - } else { - tp.blockHeight = bh - } - if err != nil { - return modules.AddContext(err, "unable to initialize the block height in the tpool") - } - - // Get the fee median data. - mp, err := tp.getFeeMedian() - if err != nil && !modules.ContainsError(err, errNilFeeMedian) { - return modules.AddContext(err, "unable to load the fee median") - } - // Just leave the fields empty if no fee median was found. They will be - // filled out. - if !modules.ContainsError(err, errNilFeeMedian) { - tp.recentMedians = mp.RecentMedians - tp.recentMedianFee = mp.RecentMedianFee - } - - // Subscribe to the consensus set using the most recent consensus change. - go func() { - err := tp.consensusSet.ConsensusSetSubscribe(tp, cc, tp.tg.StopChan()) - if modules.ContainsError(err, sync.ErrStopped) { - return - } - if modules.ContainsError(err, modules.ErrInvalidConsensusChangeID) { - tp.log.Println("WARN: invalid consensus change loaded; resetting. This can take a while.") - // Reset and rescan because the consensus set does not recognize the - // provided consensus change id. - tp.outerMu.Lock() - tp.innerMu.Lock() - resetErr := tp.resetDB() - tp.innerMu.Unlock() - tp.outerMu.Unlock() - if resetErr != nil { - tp.log.Println("CRITICAL: failed to reset tpool", resetErr) - return - } - freshScanErr := tp.consensusSet.ConsensusSetSubscribe(tp, modules.ConsensusChangeBeginning, tp.tg.StopChan()) - if modules.ContainsError(freshScanErr, sync.ErrStopped) { - return - } - if freshScanErr != nil { - tp.log.Println("CRITICAL: failed to subscribe tpool to consensusset", freshScanErr) - return - } - tp.tg.OnStop(func() { - tp.consensusSet.Unsubscribe(tp) - }) - return - } - if err != nil { - tp.log.Println("CRITICAL:", err) - return - } - }() - tp.tg.OnStop(func() { - tp.consensusSet.Unsubscribe(tp) - }) - return nil -} - -// TransactionConfirmed returns true if the transaction has been seen on the -// blockchain. Note, however, that the block containing the transaction may -// later be invalidated by a reorg. -func (tp *TransactionPool) TransactionConfirmed(id types.TransactionID) (bool, error) { - if err := tp.tg.Add(); err != nil { - return false, modules.AddContext(err, "cannot check transaction status, the transaction pool has closed") - } - defer tp.tg.Done() - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - return tp.transactionConfirmed(id), nil -} - -func (tp *TransactionPool) transactionConfirmed(id types.TransactionID) bool { - var count int - err := tp.dbTx.QueryRow("SELECT COUNT(*) from tp_ctx WHERE txid = ?", id[:]).Scan(&count) - if err != nil { - tp.log.Println("ERROR: unable to get transaction", err) - return false - } - return count > 0 -} diff --git a/modules/transactionpool/standard.go b/modules/transactionpool/standard.go deleted file mode 100644 index 7ccfc5d..0000000 --- a/modules/transactionpool/standard.go +++ /dev/null @@ -1,140 +0,0 @@ -package transactionpool - -import ( - "errors" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// standard.go adds extra rules to transactions which help preserve network -// health and provides flexibility for future soft forks and tweaks to the -// network. -// -// Rule: Transaction size is limited. -// There is a DoS vector where large transactions can both contain many -// signatures, and have each signature's CoveredFields object cover a -// unique but large portion of the transaction. A 1mb transaction could -// force a verifier to hash very large volumes of data, which takes a long -// time on nonspecialized hardware. -// -// Rule: Foreign signature algorithms are rejected. -// There are plans to add newer, faster signature algorithms to Sia as the -// project matures and the need for increased verification speed grows. -// Foreign signatures are allowed into the blockchain, where they are -// accepted as valid. However, if there has been a soft-fork, the foreign -// signatures might actually be invalid. This rule protects legacy miners -// from including potentially invalid transactions in their blocks. -// -// Rule: The types of allowed arbitrary data are limited. -// The arbitrary data field can be used to orchestrate soft-forks to Sia -// that add features. Legacy miners are at risk of creating invalid blocks -// if they include arbitrary data which has meanings that the legacy miner -// doesn't understand. -// -// Rule: The transaction set size is limited. -// A group of dependent transactions cannot exceed 100kb to limit how -// quickly the transaction pool can be filled with new transactions. - -// checkUnlockConditions looks at the UnlockConditions and verifies that all -// public keys are recognized. Unrecognized public keys are automatically -// accepted as valid by the consnensus set, but rejected by the transaction -// pool. This allows new types of keys to be added via a softfork without -// alienating all of the older nodes. -func checkUnlockConditions(uc types.UnlockConditions) error { - for _, pk := range uc.PublicKeys { - if pk.Algorithm != types.SpecifierEntropy && - pk.Algorithm != types.SpecifierEd25519 { - return errors.New("unrecognized key type in transaction") - } - } - - return nil -} - -// isStandardTransaction enforces extra rules such as a transaction size limit. -// These rules can be altered without disrupting consensus. -// -// The size of the transaction is returned so that the transaction does not need -// to be encoded multiple times. -func isStandardTransaction(t types.Transaction) (uint64, error) { - // Check that the size of the transaction does not exceed the standard - // established in Standard.md. Larger transactions are a DOS vector, - // because someone can fill a large transaction with a bunch of signatures - // that require hashing the entire transaction. Several hundred megabytes - // of hashing can be required of a verifier. Enforcing this rule makes it - // more difficult for attackers to exploid this DOS vector, though a miner - // with sufficient power could still create unfriendly blocks. - tlen := types.EncodedLen(t) - if tlen > modules.TransactionSizeLimit { - return 0, modules.ErrLargeTransaction - } - - // Check that all public keys are of a recognized type. Need to check all - // of the UnlockConditions, which currently can appear in 3 separate fields - // of the transaction. Unrecognized types are ignored because a softfork - // may make certain unrecognized signatures invalid, and this node cannot - // tell which signatures are the invalid ones. - for _, sci := range t.SiacoinInputs { - err := checkUnlockConditions(sci.UnlockConditions) - if err != nil { - return 0, err - } - } - for _, fcr := range t.FileContractRevisions { - err := checkUnlockConditions(fcr.UnlockConditions) - if err != nil { - return 0, err - } - } - for _, sfi := range t.SiafundInputs { - err := checkUnlockConditions(sfi.UnlockConditions) - if err != nil { - return 0, err - } - } - - // Check that all arbitrary data is prefixed using the recognized set of - // prefixes. The allowed prefixes include a 'NonSia' prefix for truly - // arbitrary data. Blocking all other prefixes allows arbitrary data to be - // used to orchestrate more complicated soft forks in the future without - // putting older nodes at risk of violating the new rules. - var prefix types.Specifier - for _, arb := range t.ArbitraryData { - // Check for a whilelisted prefix. - copy(prefix[:], arb) - if prefix == modules.PrefixHostAnnouncement || - prefix == modules.PrefixNonSia || - prefix == modules.PrefixFileContractIdentifier || - prefix == types.SpecifierFoundation { - continue - } - - return 0, modules.ErrInvalidArbPrefix - } - return uint64(tlen), nil -} - -// isStandardTransactionSet checks that all transacitons of a set follow the -// IsStandard guidelines, and that the set as a whole follows the guidelines as -// well. -// -// The size of the transaction set is returned so that the encoding only needs -// to happen once. -func isStandardTransactionSet(ts []types.Transaction) (uint64, error) { - // Check that each transaction is acceptable, while also making sure that - // the size of the whole set is legal. - var totalSize uint64 - for i := range ts { - tSize, err := isStandardTransaction(ts[i]) - if err != nil { - return 0, err - } - totalSize += tSize - if totalSize > modules.TransactionSetSizeLimit { - return 0, modules.ErrLargeTransactionSet - } - } - return totalSize, nil -} diff --git a/modules/transactionpool/subscribe.go b/modules/transactionpool/subscribe.go deleted file mode 100644 index 7d1793b..0000000 --- a/modules/transactionpool/subscribe.go +++ /dev/null @@ -1,115 +0,0 @@ -package transactionpool - -import ( - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// updateSubscribersTransactions sends a new transaction pool update to all -// subscribers. -func (tp *TransactionPool) updateSubscribersTransactions() { - diff := new(modules.TransactionPoolDiff) - // Create all of the diffs for reverted sets. - for id := range tp.subscriberSets { - // The transaction set is still in the transaction pool, no need to - // create an update. - _, exists := tp.transactionSets[id] - if exists { - continue - } - - // Report that this set has been removed. Negative diffs don't have all - // fields filled out. - diff.RevertedTransactions = append(diff.RevertedTransactions, modules.TransactionSetID(id)) - } - - // Clear the subscriber sets map. - for _, revert := range diff.RevertedTransactions { - delete(tp.subscriberSets, modules.TransactionSetID(revert)) - } - - // Create all of the diffs for sets that have been recently created. - for id, set := range tp.transactionSets { - _, exists := tp.subscriberSets[id] - if exists { - // The transaction set has already been sent in an update. - continue - } - - // Report that this transaction set is new to the transaction pool. - ids := make([]types.TransactionID, 0, len(set)) - sizes := make([]uint64, 0, len(set)) - for i := range set { - encodedLen := types.EncodedLen(set[i]) - sizes = append(sizes, uint64(encodedLen)) - ids = append(ids, set[i].ID()) - } - ut := &modules.UnconfirmedTransactionSet{ - Change: tp.transactionSetDiffs[id], - ID: modules.TransactionSetID(id), - - IDs: ids, - Sizes: sizes, - Transactions: set, - } - // Add this diff to our set of subscriber diffs. - tp.subscriberSets[id] = ut - diff.AppliedTransactions = append(diff.AppliedTransactions, ut) - } - - for _, subscriber := range tp.subscribers { - subscriber.ReceiveUpdatedUnconfirmedTransactions(diff) - } -} - -// TransactionPoolSubscribe adds a subscriber to the transaction pool. -// Subscribers will receive the full transaction set every time there is a -// significant change to the transaction pool. -func (tp *TransactionPool) TransactionPoolSubscribe(subscriber modules.TransactionPoolSubscriber) { - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - // Check that this subscriber is not already subscribed. - for _, s := range tp.subscribers { - if s == subscriber { - tp.log.Println("CRITICAL: refusing to double-subscribe subscriber") - } - } - - // Add the subscriber to the subscriber list. - tp.subscribers = append(tp.subscribers, subscriber) - - // Send the new subscriber the transaction pool set. - diff := new(modules.TransactionPoolDiff) - diff.AppliedTransactions = make([]*modules.UnconfirmedTransactionSet, 0, len(tp.subscriberSets)) - for _, ut := range tp.subscriberSets { - diff.AppliedTransactions = append(diff.AppliedTransactions, ut) - } - subscriber.ReceiveUpdatedUnconfirmedTransactions(diff) -} - -// Unsubscribe removes a subscriber from the transaction pool. If the -// subscriber is not in tp.subscribers, Unsubscribe does nothing. If the -// subscriber occurs more than once in tp.subscribers, only the earliest -// occurrence is removed (unsubscription fails). -func (tp *TransactionPool) Unsubscribe(subscriber modules.TransactionPoolSubscriber) { - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - // Search for and remove subscriber from list of subscribers. - for i := range tp.subscribers { - if tp.subscribers[i] == subscriber { - tp.subscribers = append(tp.subscribers[0:i], tp.subscribers[i+1:]...) - break - } - } -} diff --git a/modules/transactionpool/transactionpool.go b/modules/transactionpool/transactionpool.go deleted file mode 100644 index 81ccc34..0000000 --- a/modules/transactionpool/transactionpool.go +++ /dev/null @@ -1,314 +0,0 @@ -package transactionpool - -import ( - "database/sql" - "errors" - "sync" - - siasync "github.com/mike76-dev/sia-satellite/internal/sync" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - - "go.sia.tech/core/types" -) - -var ( - errNilDB = errors.New("transaction pool cannot initialize with a nil database") - errNilCS = errors.New("transaction pool cannot initialize with a nil consensus set") - errNilGateway = errors.New("transaction pool cannot initialize with a nil gateway") -) - -type ( - // ObjectID is the ID of an object such as Siacoin output and file - // contracts, and is used to see if there is are conflicts or overlaps within - // the transaction pool. - ObjectID types.Hash256 - - // The TransactionPool tracks incoming transactions, accepting them or - // rejecting them based on internal criteria such as fees and unconfirmed - // double spends. - TransactionPool struct { - // Dependencies of the transaction pool. - consensusSet modules.ConsensusSet - gateway modules.Gateway - - // To prevent double spends in the unconfirmed transaction set, the - // transaction pool keeps a list of all objects that have either been - // created or consumed by the current unconfirmed transaction pool. All - // transactions with overlaps are rejected. This model is - // over-aggressive - one transaction set may create an object that - // another transaction set spends. This is done to minimize the - // computation and memory load on the transaction pool. Dependent - // transactions should be lumped into a single transaction set. - // - // transactionSetDiffs map form a transaction set id to the set of - // diffs that resulted from the transaction set. - knownObjects map[ObjectID]modules.TransactionSetID - subscriberSets map[modules.TransactionSetID]*modules.UnconfirmedTransactionSet - transactionHeights map[types.TransactionID]uint64 - transactionSets map[modules.TransactionSetID][]types.Transaction - transactionSetDiffs map[modules.TransactionSetID]*modules.ConsensusChange - transactionListSize int - - // Variables related to the blockchain. - blockHeight uint64 - recentMedians []types.Currency - recentMedianFee types.Currency // SC per byte. - - // The consensus change index tracks how many consensus changes have - // been sent to the transaction pool. When a new subscriber joins the - // transaction pool, all prior consensus changes are sent to the new - // subscriber. - subscribers []modules.TransactionPoolSubscriber - - // Utilities. - db *sql.DB - dbTx *sql.Tx - log *persist.Logger - outerMu sync.Mutex - innerMu sync.RWMutex - tg siasync.ThreadGroup - } -) - -// Enforce that TransactionPool satisfies the modules.TransactionPool interface. -var _ modules.TransactionPool = (*TransactionPool)(nil) - -// New creates a transaction pool that is ready to receive transactions. -func New(db *sql.DB, cs modules.ConsensusSet, g modules.Gateway, dir string) (*TransactionPool, error) { - // Check that the input modules are non-nil. - if db == nil { - return nil, errNilDB - } - if cs == nil { - return nil, errNilCS - } - if g == nil { - return nil, errNilGateway - } - - // Initialize a transaction pool. - tp := &TransactionPool{ - db: db, - consensusSet: cs, - gateway: g, - - knownObjects: make(map[ObjectID]modules.TransactionSetID), - subscriberSets: make(map[modules.TransactionSetID]*modules.UnconfirmedTransactionSet), - transactionHeights: make(map[types.TransactionID]uint64), - transactionSets: make(map[modules.TransactionSetID][]types.Transaction), - transactionSetDiffs: make(map[modules.TransactionSetID]*modules.ConsensusChange), - } - - // Open the tpool database. - err := tp.initPersist(dir) - if err != nil { - return nil, err - } - - // Register RPCs. - g.RegisterRPC("RelayTransactionSet", tp.relayTransactionSet) - tp.tg.OnStop(func() { - tp.gateway.UnregisterRPC("RelayTransactionSet") - }) - - return tp, nil -} - -// Close releases any resources held by the transaction pool, stopping all of -// its worker threads. -func (tp *TransactionPool) Close() error { - return tp.tg.Stop() -} - -// FeeEstimation returns an estimation for what fee should be applied to -// transactions. It returns a minimum and maximum estimated fee per transaction -// byte. -func (tp *TransactionPool) FeeEstimation() (min, max types.Currency) { - err := tp.tg.Add() - if err != nil { - return - } - defer tp.tg.Done() - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - // Use three methods to determine an acceptable fee. The first method looks - // at what fee is required to get into a block on the blockchain based on - // the actual fees of transactions confirmed in recent blocks. The second - // method looks at the current tpool and performs fee estimation based on - // the other transactions in the tpool. The third method is an absolute - // minimum. - - // First method: use the median fees calculated while looking at - // transactions that have been confirmed in the recent blocks. - feeByBlockchain := tp.recentMedianFee - - // Second method: use the median fees calculated while looking at the - // current size of the transaction pool. For the min fee, use a size that's - // a fixed size larger than the current pool, and then also add some - // proportional padding. The fixed size handles cases where the tpool is - // really small, and a low number of transactions can move the fee - // substantially. The proportional padding is for when the tpool is large - // and there is a lot of activity which is adding to the tpool. - // - // The sizes for proportional and constant are computed independently, and - // then the max is taken of the two. - sizeAfterConstantPadding := tp.transactionListSize + feeEstimationConstantPadding - sizeAfterProportionalPadding := int(float64(tp.transactionListSize) * float64(feeEstimationProportionalPadding)) - var feeByCurrentTpoolSize types.Currency - if sizeAfterConstantPadding > sizeAfterProportionalPadding { - feeByCurrentTpoolSize = requiredFeesToExtendTpoolAtSize(sizeAfterConstantPadding) - } else { - feeByCurrentTpoolSize = requiredFeesToExtendTpoolAtSize(sizeAfterProportionalPadding) - } - - // Pick the larger of the first two methods to be compared with the third - // method. - if feeByBlockchain.Cmp(feeByCurrentTpoolSize) > 0 { - min = feeByBlockchain - } else { - min = feeByCurrentTpoolSize - } - - // Third method: ensure the fee is above an absolute minimum. - if min.Cmp(minEstimation) < 0 { - min = minEstimation - } - max = min.Mul64(maxMultiplier) - return -} - -// TransactionList returns a list of all transactions in the transaction pool. -// The transactions are provided in an order that can acceptably be put into a -// block. -func (tp *TransactionPool) TransactionList() []types.Transaction { - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - var txns []types.Transaction - for _, tSet := range tp.transactionSets { - txns = append(txns, tSet...) - } - return txns -} - -// Transaction returns the transaction with the provided txid, its parents, and -// a bool indicating if it exists in the transaction pool. -func (tp *TransactionPool) Transaction(id types.TransactionID) (types.Transaction, []types.Transaction, bool) { - tp.outerMu.Lock() - tp.innerMu.Lock() - defer func() { - tp.innerMu.Unlock() - tp.outerMu.Unlock() - }() - - // Find the transaction. - exists := false - var txn types.Transaction - var allParents []types.Transaction - for _, tSet := range tp.transactionSets { - for i, t := range tSet { - if t.ID() == id { - txn = t - allParents = tSet[:i] - exists = true - break - } - } - } - - // prune unneeded parents - parentIDs := make(map[ObjectID]struct{}) - addOutputIDs := func(txn types.Transaction) { - for _, input := range txn.SiacoinInputs { - parentIDs[ObjectID(input.ParentID)] = struct{}{} - } - for _, fcr := range txn.FileContractRevisions { - parentIDs[ObjectID(fcr.ParentID)] = struct{}{} - } - for _, input := range txn.SiafundInputs { - parentIDs[ObjectID(input.ParentID)] = struct{}{} - } - for _, proof := range txn.StorageProofs { - parentIDs[ObjectID(proof.ParentID)] = struct{}{} - } - for _, sig := range txn.Signatures { - parentIDs[ObjectID(sig.ParentID)] = struct{}{} - } - } - isParent := func(t types.Transaction) bool { - for i := range t.SiacoinOutputs { - if _, exists := parentIDs[ObjectID(t.SiacoinOutputID(i))]; exists { - return true - } - } - for i := range t.FileContracts { - if _, exists := parentIDs[ObjectID(t.SiacoinOutputID(i))]; exists { - return true - } - } - for i := range t.SiafundOutputs { - if _, exists := parentIDs[ObjectID(t.SiacoinOutputID(i))]; exists { - return true - } - } - return false - } - - addOutputIDs(txn) - var necessaryParents []types.Transaction - for i := len(allParents) - 1; i >= 0; i-- { - parent := allParents[i] - - if isParent(parent) { - necessaryParents = append([]types.Transaction{parent}, necessaryParents...) - addOutputIDs(parent) - } - } - - return txn, necessaryParents, exists -} - -// Transactions returns the transactions of the transaction pool. -func (tp *TransactionPool) Transactions() []types.Transaction { - tp.innerMu.RLock() - defer tp.innerMu.RUnlock() - var txns []types.Transaction - for _, set := range tp.transactionSets { - txns = append(txns, set...) - } - return txns -} - -// TransactionSet returns the transaction set the provided object appears in. -func (tp *TransactionPool) TransactionSet(oid types.Hash256) []types.Transaction { - tp.innerMu.RLock() - defer tp.innerMu.RUnlock() - // Define txns as to not use the memory that stores the actual map - var txns []types.Transaction - tSetID, exists := tp.knownObjects[ObjectID(oid)] - if !exists { - return nil - } - tSet, exists := tp.transactionSets[tSetID] - if !exists { - return nil - } - txns = append(txns, tSet...) - return txns -} - -// Broadcast broadcasts a transaction set to all of the transaction pool's -// peers. -func (tp *TransactionPool) Broadcast(ts []types.Transaction) { - go tp.gateway.Broadcast("RelayTransactionSet", transactionSet(ts), tp.gateway.Peers()) -} diff --git a/modules/transactionpool/update.go b/modules/transactionpool/update.go deleted file mode 100644 index 98e3dfa..0000000 --- a/modules/transactionpool/update.go +++ /dev/null @@ -1,424 +0,0 @@ -package transactionpool - -import ( - "fmt" - "sort" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// findSets takes a bunch of transactions (presumably from a block) and finds -// all of the separate transaction sets within it. Set does not check for -// conflicts. -// -// The algorithm goes through one transaction at a time. All of the outputs of -// that transaction are added to the objMap, pointing to the transaction to -// indicate that the transaction contains those outputs. The transaction is -// assigned an integer id (each transaction will have a unique id) and added to -// the txMap. -// -// The transaction's inputs are then checked against the objMap to see if there -// are any parents of the transaction in the graph. If there are, the -// transaction is added to the parent set instead of its own set. If not, the -// transaction is added as its own set. -// -// The forwards map contains a list of ints indicating when a transaction has -// been merged with a set. When a transaction gets merged with a parent set, its -// integer id gets added to the forwards map, indicating that the transaction is -// no longer in its own set, but instead has been merged with other sets. -// -// Some transactions will have parents from multiple distinct sets. If a -// transaction has parents in multiple distinct sets, those sets get merged -// together and the transaction gets added to the result. One of the sets is -// nominated (arbitrarily) as the official set, and the integer id of the other -// set and the new transaction get forwarded to the official set. -// -// TODO: Set merging currently occurs any time that there is a child. But -// really, it should only occur if the child increases the average fee value of -// the set that it is merging with (which it will if and only if it has a higher -// average fee than that set). If the child has multiple parent sets, it should -// be compared with the parent set that has the lowest fee value. Then, after it -// is merged with that parent, the result should be merged with the next -// lowest-fee parent set if and only if the new set has a higher average fee -// than the parent set. And this continues until either all of the sets have -// been merged, or until the remaining parent sets have higher values. -func findSets(ts []types.Transaction) [][]types.Transaction { - // txMap marks what set each transaction is in. If two sets get combined, - // this number will not be updated. The 'forwards' map defined further on - // will help to discover which sets have been combined. - txMap := make(map[types.TransactionID]int) - setMap := make(map[int][]types.Transaction) - objMap := make(map[ObjectID]types.TransactionID) - forwards := make(map[int]int) - - // Define a function to follow and collapse any update chain. - forward := func(prev int) (ret int) { - ret = prev - next, exists := forwards[prev] - for exists { - ret = next - forwards[prev] = next // Collapse the forwards function to prevent quadratic runtime of findSets. - next, exists = forwards[next] - } - return ret - } - - // Add the transactions to the setup one-by-one, merging them as they belong - // to a set. - for i, t := range ts { - // Check if the inputs depend on any previous transaction outputs. - tid := t.ID() - parentSets := make(map[int]struct{}) - for _, obj := range t.SiacoinInputs { - txid, exists := objMap[ObjectID(obj.ParentID)] - if exists { - parentSet := forward(txMap[txid]) - parentSets[parentSet] = struct{}{} - } - } - for _, obj := range t.FileContractRevisions { - txid, exists := objMap[ObjectID(obj.ParentID)] - if exists { - parentSet := forward(txMap[txid]) - parentSets[parentSet] = struct{}{} - } - } - for _, obj := range t.StorageProofs { - txid, exists := objMap[ObjectID(obj.ParentID)] - if exists { - parentSet := forward(txMap[txid]) - parentSets[parentSet] = struct{}{} - } - } - for _, obj := range t.SiafundInputs { - txid, exists := objMap[ObjectID(obj.ParentID)] - if exists { - parentSet := forward(txMap[txid]) - parentSets[parentSet] = struct{}{} - } - } - - // Determine the new counter for this transaction. - if len(parentSets) == 0 { - // No parent sets. Make a new set for this transaction. - txMap[tid] = i - setMap[i] = []types.Transaction{t} - // Don't need to add anything for the file contract outputs, storage - // proof outputs, Siafund claim outputs; these outputs are not - // allowed to be spent until 50 confirmations. - } else { - // There are parent sets, pick one as the base and then merge the - // rest into it. - parentsSlice := make([]int, 0, len(parentSets)) - for j := range parentSets { - parentsSlice = append(parentsSlice, j) - } - base := parentsSlice[0] - txMap[tid] = base - for _, j := range parentsSlice[1:] { - // Forward any future transactions pointing at this set to the - // base set. - forwards[j] = base - // Combine the transactions in this set with the transactions in - // the base set. - setMap[base] = append(setMap[base], setMap[j]...) - // Delete this set map, it has been merged with the base set. - delete(setMap, j) - } - // Add this transaction to the base set. - setMap[base] = append(setMap[base], t) - } - - // Mark this transaction's outputs as potential inputs to future - // transactions. - for j := range t.SiacoinOutputs { - scoid := t.SiacoinOutputID(j) - objMap[ObjectID(scoid)] = tid - } - for j := range t.FileContracts { - fcid := t.FileContractID(j) - objMap[ObjectID(fcid)] = tid - } - for j := range t.FileContractRevisions { - fcid := t.FileContractRevisions[j].ParentID - objMap[ObjectID(fcid)] = tid - } - for j := range t.SiafundOutputs { - sfoid := t.SiafundOutputID(j) - objMap[ObjectID(sfoid)] = tid - } - } - - // Compile the final group of sets. - ret := make([][]types.Transaction, 0, len(setMap)) - for _, set := range setMap { - ret = append(ret, set) - } - return ret -} - -// purge removes all transactions from the transaction pool. -func (tp *TransactionPool) purge() { - tp.knownObjects = make(map[ObjectID]modules.TransactionSetID) - tp.transactionSets = make(map[modules.TransactionSetID][]types.Transaction) - tp.transactionSetDiffs = make(map[modules.TransactionSetID]*modules.ConsensusChange) - tp.transactionHeights = make(map[types.TransactionID]uint64) - tp.transactionListSize = 0 -} - -// ProcessConsensusChange gets called to inform the transaction pool of changes -// to the consensus set. -func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { - tp.outerMu.Lock() - tp.innerMu.Lock() - - // Get the recent block ID for a sanity check that the consensus change is - // being provided to us correctly. - resetSanityCheck := false - recentID, err := tp.getRecentBlockID() - if modules.ContainsError(err, errNilRecentBlock) || cc.BlockHeight <= tp.blockHeight { - // This almost certainly means that the database hasn't been initialized - // yet with a recent block. - resetSanityCheck = true - } else if err != nil { - tp.log.Println("CRITICAL: could not access recentID from tpool:", err) - } - - // Update the database of confirmed transactions. - for _, block := range cc.RevertedBlocks { - // Sanity check - the id of each reverted block should match the recent - // parent id. - if block.ID() != recentID && !resetSanityCheck { - panic(fmt.Sprintf("Consensus change series appears to be inconsistent - we are reverting the wrong block. bid: %v recent: %v", block.ID(), recentID)) - } - recentID = block.ParentID - - for _, txn := range block.Transactions { - err := tp.deleteTransaction(txn.ID()) - if err != nil { - tp.log.Println("ERROR: could not delete a transaction:", err) - } - } - - // Pull the transactions out of the fee summary. For estimating only - // over 10 blocks, it is extremely likely that there will be more - // applied blocks than reverted blocks, and if there aren't (a height - // decreasing reorg), there will be more than 10 applied blocks. - if len(tp.recentMedians) > 0 { - // Strip out all of the transactions in this block. - tp.recentMedians = tp.recentMedians[:len(tp.recentMedians)-1] - } - } - - for _, block := range cc.AppliedBlocks { - // Sanity check - the parent id of each block should match the current - // block id. - if block.ParentID != recentID && !resetSanityCheck { - panic(fmt.Sprintf("Consensus change series appears to be inconsistent - we are applying the wrong block. pid: %v recent: %v", block.ParentID, recentID)) - } - recentID = block.ID() - - for _, txn := range block.Transactions { - err := tp.putTransaction(txn.ID()) - if err != nil { - tp.log.Println("ERROR: could not add a transaction:", err) - } - } - - // Find the median transaction fee for this block. - type feeSummary struct { - fee types.Currency - size int - } - var fees []feeSummary - var totalSize int - txnSets := findSets(block.Transactions) - for _, set := range txnSets { - // Compile the fees for this set. - var feeSum types.Currency - var sizeSum int - for _, txn := range set { - sizeSum += types.EncodedLen(txn) - for _, fee := range txn.MinerFees { - feeSum = feeSum.Add(fee) - } - } - feeAvg := feeSum.Div64(uint64(sizeSum)) - fees = append(fees, feeSummary{ - fee: feeAvg, - size: sizeSum, - }) - totalSize += sizeSum - } - // Add an extra zero-fee tranasction for any unused block space. - remaining := int(modules.BlockSizeLimit) - totalSize - fees = append(fees, feeSummary{ - fee: types.ZeroCurrency, - size: remaining, // Fine if remaining is zero. - }) - // Sort the fees by value and then scroll until the median. - sort.Slice(fees, func(i, j int) bool { - return fees[i].fee.Cmp(fees[j].fee) < 0 - }) - var progress int - for i := range fees { - progress += fees[i].size - // Instead of grabbing the full median, look at the 75%-ile. It's - // going to be cheaper than the 50%-ile, but it still got into a - // block. - if uint64(progress) > modules.BlockSizeLimit/4 { - tp.recentMedians = append(tp.recentMedians, fees[i].fee) - break - } - } - - // If there are more than 10 blocks recorded in the txnsPerBlock, strip - // off the oldest blocks. - for len(tp.recentMedians) > blockFeeEstimationDepth { - tp.recentMedians = tp.recentMedians[1:] - } - } - // Grab the median of the recent medians. Copy to a new slice so the sorting - // doesn't screw up the slice. - safeMedians := make([]types.Currency, len(tp.recentMedians)) - copy(safeMedians, tp.recentMedians) - sort.Slice(safeMedians, func(i, j int) bool { - return safeMedians[i].Cmp(safeMedians[j]) < 0 - }) - tp.recentMedianFee = safeMedians[len(safeMedians)/2] - - // Update all the on-disk structures. - tp.blockHeight = cc.BlockHeight - err = tp.putRecentConsensusChange(cc.ID) - if err != nil { - tp.log.Println("ERROR: could not update the recent consensus change:", err) - } - err = tp.putRecentBlockID(recentID) - if err != nil { - tp.log.Println("ERROR: could not store recent block id:", err) - } - err = tp.putBlockHeight(tp.blockHeight) - if err != nil { - tp.log.Println("ERROR: could not update the block height:", err) - } - err = tp.putFeeMedian(medianPersist{ - RecentMedians: tp.recentMedians, - RecentMedianFee: tp.recentMedianFee, - }) - if err != nil { - tp.log.Println("ERROR: could not update the transaction pool median fee information:", err) - } - - // Scan the applied blocks for transactions that got accepted. This will - // help to determine which transactions to remove from the transaction - // pool. Having this list enables both efficiency improvements and helps to - // clean out transactions with no dependencies, such as arbitrary data - // transactions from the host. - txids := make(map[types.TransactionID]struct{}) - for _, block := range cc.AppliedBlocks { - for _, txn := range block.Transactions { - txids[txn.ID()] = struct{}{} - } - } - - // Save all of the current unconfirmed transaction sets into a list. - var unconfirmedSets [][]types.Transaction - for _, tSet := range tp.transactionSets { - // Compile a new transaction set the removes all transactions duplicated - // in the block. Though mostly handled by the dependency manager in the - // transaction pool, this should both improve efficiency and will strip - // out duplicate transactions with no dependencies (arbitrary data only - // transactions). - var newTSet []types.Transaction - for _, txn := range tSet { - _, exists := txids[txn.ID()] - if !exists { - newTSet = append(newTSet, txn) - } - } - unconfirmedSets = append(unconfirmedSets, newTSet) - } - // Save all of the old transaction heights. - oldHeights := tp.transactionHeights - - // Purge the transaction pool. Some of the transactions sets may be invalid - // after the consensus change. - tp.purge() - - // Prune transaction sets where all transactions have hit the max - // transaction age. - for i, tSet := range unconfirmedSets { - // Check whether all transactions in this transaction set are old. - old := true - for _, txn := range tSet { - seenHeight, seen := oldHeights[txn.ID()] - if !seen { - // If the transaction hasn't been seen before, add it to the set - // of seen transactions. - tp.transactionHeights[txn.ID()] = tp.blockHeight - 1 - tp.log.Println("CRITICAL: transaction found in tpool which did not have its height recorded") - } - if tp.blockHeight-seenHeight < MaxTransactionAge || !seen { - old = false - break - } - } - - // All of the transactions in this set are old, this set should be - // evicted. - if old { - unconfirmedSets[i] = []types.Transaction{} - for _, txn := range tSet { - delete(tp.transactionHeights, txn.ID()) - } - } - } - - // Scan through the reverted blocks and re-add any transactions that got - // reverted to the tpool. - for i := len(cc.RevertedBlocks) - 1; i >= 0; i-- { - block := cc.RevertedBlocks[i] - for _, txn := range block.Transactions { - // Check whether this transaction has already be re-added to the - // consensus set by the applied blocks. - _, exists := txids[txn.ID()] - if exists { - continue - } - - // Try adding the transaction back into the transaction pool. - tp.acceptTransactionSet([]types.Transaction{txn}, cc.TryTransactionSet) // Error is ignored. - } - } - - // Add all of the unconfirmed transaction sets back to the transaction - // pool. The ones that are invalid will throw an error and will not be - // re-added. - // - // Accepting a transaction set requires locking the consensus set (to check - // validity). But, ProcessConsensusChange is only called when the consensus - // set is already locked, causing a deadlock problem. Therefore, - // transactions are readded to the pool in a goroutine, so that this - // function can finish and consensus can unlock. The tpool lock is held - // however until the goroutine completes. - // - // Which means that no other modules can require a tpool lock when - // processing consensus changes. Overall, the locking is pretty fragile and - // more rules need to be put in place. - for _, set := range unconfirmedSets { - for _, txn := range set { - tp.acceptTransactionSet([]types.Transaction{txn}, cc.TryTransactionSet) // Error is ignored. - // acceptTransactionSet will set the transaction height to the - // current height because of the purge mechanism. Reset the height - // to the original height before the purge. - tp.transactionHeights[txn.ID()] = oldHeights[txn.ID()] - } - } - - // Inform subscribers that an update has executed. - tp.innerMu.Unlock() - tp.updateSubscribersTransactions() - tp.outerMu.Unlock() -} diff --git a/modules/transactions.go b/modules/transactions.go deleted file mode 100644 index a2fc929..0000000 --- a/modules/transactions.go +++ /dev/null @@ -1,912 +0,0 @@ -package modules - -import ( - "bytes" - "errors" - "io" - - "go.sia.tech/core/types" -) - -var ( - // ErrInvalidSignature is returned if a signature is provided that does not - // match the data and public key. - ErrInvalidSignature = errors.New("invalid signature") - // ErrEntropyKey is the error when a transaction tries to sign an entropy - // public key. - ErrEntropyKey = errors.New("transaction tries to sign an entropy public key") - // ErrFrivolousSignature is the error when a transaction contains a frivolous - // signature. - ErrFrivolousSignature = errors.New("transaction contains a frivolous signature") - // ErrInvalidPubKeyIndex is the error when a transaction contains a signature - // that points to a nonexistent public key. - ErrInvalidPubKeyIndex = errors.New("transaction contains a signature that points to a nonexistent public key") - // ErrInvalidUnlockHashChecksum is the error when the provided unlock hash has - // an invalid checksum. - ErrInvalidUnlockHashChecksum = errors.New("provided unlock hash has an invalid checksum") - // ErrMissingSignatures is the error when a transaction has inputs with missing - // signatures. - ErrMissingSignatures = errors.New("transaction has inputs with missing signatures") - // ErrPrematureSignature is the error when the timelock on signature has not - // expired. - ErrPrematureSignature = errors.New("timelock on signature has not expired") - // ErrPublicKeyOveruse is the error when public key was used multiple times while - // signing transaction. - ErrPublicKeyOveruse = errors.New("public key was used multiple times while signing transaction") - // ErrSortedUniqueViolation is the error when a sorted unique violation occurs. - ErrSortedUniqueViolation = errors.New("sorted unique violation") - // ErrUnlockHashWrongLen is the error when a marshalled unlock hash is the wrong - // length. - ErrUnlockHashWrongLen = errors.New("marshalled unlock hash is the wrong length") - // ErrWholeTransactionViolation is the error when there's a covered fields violation. - ErrWholeTransactionViolation = errors.New("covered fields violation") - // ErrDoubleSpend is an error when a transaction uses a parent object - // twice. - ErrDoubleSpend = errors.New("transaction uses a parent object twice") - // ErrFileContractOutputSumViolation is an error when a file contract - // has invalid output sums. - ErrFileContractOutputSumViolation = errors.New("file contract has invalid output sums") - // ErrFileContractWindowEndViolation is an error when a file contract - // window must end at least one block after it starts. - ErrFileContractWindowEndViolation = errors.New("file contract window must end at least one block after it starts") - // ErrFileContractWindowStartViolation is an error when a file contract - // window must start in the future. - ErrFileContractWindowStartViolation = errors.New("file contract window must start in the future") - // ErrNonZeroClaimStart is an error when a transaction has a siafund - // output with a non-zero Siafund claim. - ErrNonZeroClaimStart = errors.New("transaction has a Siafund output with a non-zero Siafund claim") - // ErrNonZeroRevision is an error when a new file contract has a - // nonzero revision number. - ErrNonZeroRevision = errors.New("new file contract has a nonzero revision number") - // ErrStorageProofWithOutputs is an error when a transaction has both - // a storage proof and other outputs. - ErrStorageProofWithOutputs = errors.New("transaction has both a storage proof and other outputs") - // ErrTimelockNotSatisfied is an error when a timelock has not been met. - ErrTimelockNotSatisfied = errors.New("timelock has not been met") - // ErrTransactionTooLarge is an error when a transaction is too large - // to fit in a block. - ErrTransactionTooLarge = errors.New("transaction is too large to fit in a block") - // ErrZeroMinerFee is an error when a transaction has a zero value miner - // fee. - ErrZeroMinerFee = errors.New("transaction has a zero value miner fee") - // ErrZeroOutput is an error when a transaction cannot have an output - // or payout that has zero value. - ErrZeroOutput = errors.New("transaction cannot have an output or payout that has zero value") - // ErrZeroRevision is an error when a transaction has a file contract - // revision with RevisionNumber=0. - ErrZeroRevision = errors.New("transaction has a file contract revision with RevisionNumber=0") - // ErrInvalidFoundationUpdateEncoding is returned when a transaction - // contains an improperly-encoded FoundationUnlockHashUpdate. - ErrInvalidFoundationUpdateEncoding = errors.New("transaction contains an improperly-encoded FoundationUnlockHashUpdate") - // ErrUninitializedFoundationUpdate is returned when a transaction contains - // an uninitialized FoundationUnlockHashUpdate. To prevent accidental - // misuse, updates cannot set the Foundation addresses to the empty ("void") - // UnlockHash. - ErrUninitializedFoundationUpdate = errors.New("transaction contains an uninitialized FoundationUnlockHashUpdate") - - // These Specifiers enumerate the types of signatures that are recognized - // by this implementation. If a signature's type is unrecognized, the - // signature is treated as valid. Signatures using the special "entropy" - // type are always treated as invalid; see Consensus.md for more details. - - // SignatureEd25519 is a specifier for Ed22519 - SignatureEd25519 = types.NewSpecifier("ed25519") - // SignatureEntropy is a specifier for entropy - SignatureEntropy = types.NewSpecifier("entropy") -) - -// SiacoinOutputSum returns the sum of all the Siacoin outputs in the -// transaction, which must match the sum of all the Siacoin inputs. Siacoin -// outputs created by storage proofs and Siafund outputs are not considered, as -// they were considered when the contract responsible for funding them was -// created. -func SiacoinOutputSum(t types.Transaction) (sum types.Currency) { - // Add the siacoin outputs. - for _, sco := range t.SiacoinOutputs { - sum = sum.Add(sco.Value) - } - - // Add the file contract payouts. - for _, fc := range t.FileContracts { - sum = sum.Add(fc.Payout) - } - - // Add the miner fees. - for _, fee := range t.MinerFees { - sum = sum.Add(fee) - } - - return -} - -// correctFileContracts checks that the file contracts adhere to the file -// contract rules. -func correctFileContracts(t types.Transaction, currentHeight uint64) error { - // Check that FileContract rules are being followed. - for _, fc := range t.FileContracts { - // Check that start and expiration are reasonable values. - if fc.WindowStart <= currentHeight { - return ErrFileContractWindowStartViolation - } - if fc.WindowEnd <= fc.WindowStart { - return ErrFileContractWindowEndViolation - } - - // Check that the proof outputs sum to the payout after the - // Siafund fee has been applied. - var validProofOutputSum, missedProofOutputSum types.Currency - for _, output := range fc.ValidProofOutputs { - validProofOutputSum = validProofOutputSum.Add(output.Value) - } - for _, output := range fc.MissedProofOutputs { - missedProofOutputSum = missedProofOutputSum.Add(output.Value) - } - outputPortion := PostTax(currentHeight, fc.Payout) - if validProofOutputSum.Cmp(outputPortion) != 0 { - return ErrFileContractOutputSumViolation - } - if missedProofOutputSum.Cmp(outputPortion) != 0 { - return ErrFileContractOutputSumViolation - } - } - return nil -} - -// correctFileContractRevisions checks that any file contract revisions adhere -// to the revision rules. -func correctFileContractRevisions(t types.Transaction, currentHeight uint64) error { - for _, fcr := range t.FileContractRevisions { - // Check that start and expiration are reasonable values. - if fcr.WindowStart <= currentHeight { - return ErrFileContractWindowStartViolation - } - if fcr.WindowEnd <= fcr.WindowStart { - return ErrFileContractWindowEndViolation - } - - // Check that the valid outputs and missed outputs sum to the same - // value. - var validProofOutputSum, missedProofOutputSum types.Currency - for _, output := range fcr.ValidProofOutputs { - validProofOutputSum = validProofOutputSum.Add(output.Value) - } - for _, output := range fcr.MissedProofOutputs { - missedProofOutputSum = missedProofOutputSum.Add(output.Value) - } - if validProofOutputSum.Cmp(missedProofOutputSum) != 0 { - return ErrFileContractOutputSumViolation - } - } - return nil -} - -// correctArbitraryData checks that any consensus-recognized ArbitraryData -// values are correctly encoded. -func correctArbitraryData(t types.Transaction, currentHeight uint64) error { - if currentHeight < FoundationHardforkHeight { - return nil - } - for _, arb := range t.ArbitraryData { - if bytes.HasPrefix(arb, types.SpecifierFoundation[:]) { - var update types.FoundationAddressUpdate - buf := bytes.NewBuffer(arb[16:]) - d := types.NewDecoder(io.LimitedReader{R: buf, N: int64(len(arb) - 16)}) - update.DecodeFrom(d) - if err := d.Err(); err != nil { - return ErrInvalidFoundationUpdateEncoding - } else if update.NewPrimary == (types.Address{}) || update.NewFailsafe == (types.Address{}) { - return ErrUninitializedFoundationUpdate - } - } - } - return nil -} - -// fitsInABlock checks if the transaction is likely to fit in a block. After -// OakHardforkHeight, transactions must be smaller than 64 KiB. -func fitsInABlock(t types.Transaction, currentHeight uint64) error { - // Check that the transaction will fit inside of a block, leaving 5kb for - // overhead. - size := uint64(types.EncodedLen(t)) - if size > BlockSizeLimit - 5e3 { - return ErrTransactionTooLarge - } - if currentHeight >= OakHardforkBlock { - if size > OakHardforkTxnSizeLimit { - return ErrTransactionTooLarge - } - } - return nil -} - -// followsMinimumValues checks that all outputs adhere to the rules for the -// minimum allowed value (generally 1). -func followsMinimumValues(t types.Transaction) error { - for _, sco := range t.SiacoinOutputs { - if sco.Value.IsZero() { - return ErrZeroOutput - } - } - for _, fc := range t.FileContracts { - if fc.Payout.IsZero() { - return ErrZeroOutput - } - } - for _, sfo := range t.SiafundOutputs { - if sfo.Value == 0 { - return ErrZeroOutput - } - } - for _, fee := range t.MinerFees { - if fee.IsZero() { - return ErrZeroMinerFee - } - } - return nil -} - -// followsStorageProofRules checks that a transaction follows the limitations -// placed on transactions that have storage proofs. -func followsStorageProofRules(t types.Transaction) error { - // No storage proofs, no problems. - if len(t.StorageProofs) == 0 { - return nil - } - - // If there are storage proofs, there can be no Siacoin outputs, Siafund - // outputs, new file contracts, or file contract terminations. These - // restrictions are in place because a storage proof can be invalidated by - // a simple reorg, which will also invalidate the rest of the transaction. - // These restrictions minimize blockchain turbulence. These other types - // cannot be invalidated by a simple reorg, and must instead by replaced by - // a conflicting transaction. - if len(t.SiacoinOutputs) != 0 { - return ErrStorageProofWithOutputs - } - if len(t.FileContracts) != 0 { - return ErrStorageProofWithOutputs - } - if len(t.FileContractRevisions) != 0 { - return ErrStorageProofWithOutputs - } - if len(t.SiafundOutputs) != 0 { - return ErrStorageProofWithOutputs - } - - return nil -} - -// noRepeats checks that a transaction does not spend multiple outputs twice, -// submit two valid storage proofs for the same file contract, etc. We -// frivolously check that a file contract termination and storage proof don't -// act on the same file contract. There is very little overhead for doing so, -// and the check is only frivolous because of the current rule that file -// contract terminations are not valid after the proof window opens. -func noRepeats(t types.Transaction) error { - // Check that there are no repeat instances of Siacoin outputs, storage - // proofs, contract terminations, or Siafund outputs. - siacoinInputs := make(map[types.SiacoinOutputID]struct{}) - for _, sci := range t.SiacoinInputs { - _, exists := siacoinInputs[sci.ParentID] - if exists { - return ErrDoubleSpend - } - siacoinInputs[sci.ParentID] = struct{}{} - } - doneFileContracts := make(map[types.FileContractID]struct{}) - for _, sp := range t.StorageProofs { - _, exists := doneFileContracts[sp.ParentID] - if exists { - return ErrDoubleSpend - } - doneFileContracts[sp.ParentID] = struct{}{} - } - for _, fcr := range t.FileContractRevisions { - _, exists := doneFileContracts[fcr.ParentID] - if exists { - return ErrDoubleSpend - } - doneFileContracts[fcr.ParentID] = struct{}{} - } - siafundInputs := make(map[types.SiafundOutputID]struct{}) - for _, sfi := range t.SiafundInputs { - _, exists := siafundInputs[sfi.ParentID] - if exists { - return ErrDoubleSpend - } - siafundInputs[sfi.ParentID] = struct{}{} - } - return nil -} - -// validUC checks that the conditions of uc have been met. The height is taken -// as input so that modules who might be at a different height can do the -// verification without needing to use their own function. Additionally, it -// means that the function does not need to be a method of the consensus set. -func validUC(uc types.UnlockConditions, currentHeight uint64) (err error) { - if uc.Timelock > currentHeight { - return ErrTimelockNotSatisfied - } - return -} - -// validUnlockConditions checks that all of the unlock conditions in the -// transaction are valid. -func validUnlockConditions(t types.Transaction, currentHeight uint64) (err error) { - for _, sci := range t.SiacoinInputs { - err = validUC(sci.UnlockConditions, currentHeight) - if err != nil { - return - } - } - for _, fcr := range t.FileContractRevisions { - err = validUC(fcr.UnlockConditions, currentHeight) - if err != nil { - return - } - } - for _, sfi := range t.SiafundInputs { - err = validUC(sfi.UnlockConditions, currentHeight) - if err != nil { - return - } - } - return -} - -// sortedUnique checks that 'elems' is sorted, contains no repeats, and that no -// element is larger than or equal to 'max'. -func sortedUnique(elems []uint64, max int) bool { - if len(elems) == 0 { - return true - } - - biggest := elems[0] - for _, elem := range elems[1:] { - if elem <= biggest { - return false - } - biggest = elem - } - if biggest >= uint64(max) { - return false - } - return true -} - -// validCoveredFields makes sure that all covered fields objects in the -// signatures follow the rules. This means that if 'WholeTransaction' is set to -// true, all fields except for 'Signatures' must be empty. All fields must be -// sorted numerically, and there can be no repeats. -func validCoveredFields(t types.Transaction) error { - for _, sig := range t.Signatures { - // Convenience variables. - cf := sig.CoveredFields - fieldMaxs := []struct { - field []uint64 - max int - }{ - {cf.SiacoinInputs, len(t.SiacoinInputs)}, - {cf.SiacoinOutputs, len(t.SiacoinOutputs)}, - {cf.FileContracts, len(t.FileContracts)}, - {cf.FileContractRevisions, len(t.FileContractRevisions)}, - {cf.StorageProofs, len(t.StorageProofs)}, - {cf.SiafundInputs, len(t.SiafundInputs)}, - {cf.SiafundOutputs, len(t.SiafundOutputs)}, - {cf.MinerFees, len(t.MinerFees)}, - {cf.ArbitraryData, len(t.ArbitraryData)}, - {cf.Signatures, len(t.Signatures)}, - } - - if cf.WholeTransaction { - // If WholeTransaction is set, all fields must be - // empty, except TransactionSignatures. - for _, fieldMax := range fieldMaxs[:len(fieldMaxs) - 1] { - if len(fieldMax.field) != 0 { - return ErrWholeTransactionViolation - } - } - } else { - // If WholeTransaction is not set, at least one field - // must be non-empty. - allEmpty := true - for _, fieldMax := range fieldMaxs { - if len(fieldMax.field) != 0 { - allEmpty = false - break - } - } - if allEmpty { - return ErrWholeTransactionViolation - } - } - - // Check that all fields are sorted, and without repeat values, and - // that all elements point to objects that exists within the - // transaction. If there are repeats, it means a transaction is trying - // to sign the same object twice. This is unncecessary, and opens up a - // DoS vector where the transaction asks the verifier to verify many GB - // of data. - for _, fieldMax := range fieldMaxs { - if !sortedUnique(fieldMax.field, fieldMax.max) { - return ErrSortedUniqueViolation - } - } - } - - return nil -} - -// SigHash returns the hash of the fields in a transaction covered by a given -// signature. -func SigHash(t types.Transaction, i int, height uint64) (hash types.Hash256) { - sig := t.Signatures[i] - if sig.CoveredFields.WholeTransaction { - return WholeSigHash(t, sig, height) - } - return PartialSigHash(t, sig.CoveredFields, height) -} - -// replayPrefix returns the replay protection prefix for the specified height. -// These prefixes are included in a transaction's SigHash; a new prefix is used -// after each hardfork to prevent replay attacks. -func replayPrefix(height uint64) []byte { - switch { - case height >= FoundationHardforkHeight: - return FoundationHardforkReplayProtectionPrefix - case height >= ASICHardforkHeight: - return ASICHardforkReplayProtectionPrefix - default: - return nil - } -} - -// WholeSigHash calculates the hash for a signature that specifies -// WholeTransaction = true. -func WholeSigHash(t types.Transaction, sig types.TransactionSignature, height uint64) (hash types.Hash256) { - h := types.NewHasher() - - h.E.WritePrefix(len((t.SiacoinInputs))) - for i := range t.SiacoinInputs { - h.E.Write(replayPrefix(height)) - t.SiacoinInputs[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.SiacoinOutputs))) - for i := range t.SiacoinOutputs { - t.SiacoinOutputs[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.FileContracts))) - for i := range t.FileContracts { - t.FileContracts[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.FileContractRevisions))) - for i := range t.FileContractRevisions { - t.FileContractRevisions[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.StorageProofs))) - for i := range t.StorageProofs { - t.StorageProofs[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.SiafundInputs))) - for i := range t.SiafundInputs { - h.E.Write(replayPrefix(height)) - t.SiafundInputs[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.SiafundOutputs))) - for i := range t.SiafundOutputs { - t.SiafundOutputs[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.MinerFees))) - for i := range t.MinerFees { - t.MinerFees[i].EncodeTo(h.E) - } - h.E.WritePrefix(len((t.ArbitraryData))) - for i := range t.ArbitraryData { - h.E.WriteBytes(t.ArbitraryData[i]) - } - - h.E.Write(sig.ParentID[:]) - h.E.WriteUint64(sig.PublicKeyIndex) - h.E.WriteUint64(uint64(sig.Timelock)) - - for _, i := range sig.CoveredFields.Signatures { - t.Signatures[i].EncodeTo(h.E) - } - - return h.Sum() -} - -// PartialSigHash calculates the hash of the fields of the transaction -// specified in cf. -func PartialSigHash(t types.Transaction, cf types.CoveredFields, height uint64) (hash types.Hash256) { - h := types.NewHasher() - - for _, input := range cf.SiacoinInputs { - h.E.Write(replayPrefix(height)) - t.SiacoinInputs[input].EncodeTo(h.E) - } - for _, output := range cf.SiacoinOutputs { - t.SiacoinOutputs[output].EncodeTo(h.E) - } - for _, contract := range cf.FileContracts { - t.FileContracts[contract].EncodeTo(h.E) - } - for _, revision := range cf.FileContractRevisions { - t.FileContractRevisions[revision].EncodeTo(h.E) - } - for _, storageProof := range cf.StorageProofs { - t.StorageProofs[storageProof].EncodeTo(h.E) - } - for _, siafundInput := range cf.SiafundInputs { - h.E.Write(replayPrefix(height)) - t.SiafundInputs[siafundInput].EncodeTo(h.E) - } - for _, siafundOutput := range cf.SiafundOutputs { - t.SiafundOutputs[siafundOutput].EncodeTo(h.E) - } - for _, minerFee := range cf.MinerFees { - t.MinerFees[minerFee].EncodeTo(h.E) - } - for _, arbData := range cf.ArbitraryData { - h.E.WriteBytes(t.ArbitraryData[arbData]) - } - for _, sig := range cf.Signatures { - t.Signatures[sig].EncodeTo(h.E) - } - - return h.Sum() -} - -// Each input has a list of public keys and a required number of signatures. -// inputSignatures keeps track of which public keys have been used and how many -// more signatures are needed. -type inputSignatures struct { - remainingSignatures uint64 - possibleKeys []types.UnlockKey - usedKeys map[uint64]struct{} - index int -} - -// validSignatures checks the validaty of all signatures in a transaction. -func validSignatures(t types.Transaction, currentHeight uint64) error { - // Check that all covered fields objects follow the rules. - err := validCoveredFields(t) - if err != nil { - return err - } - - // Create the inputSignatures object for each input. - sigMap := make(map[types.Hash256]*inputSignatures) - for i, input := range t.SiacoinInputs { - id := types.Hash256(input.ParentID) - _, exists := sigMap[id] - if exists { - return ErrDoubleSpend - } - - sigMap[id] = &inputSignatures{ - remainingSignatures: input.UnlockConditions.SignaturesRequired, - possibleKeys: input.UnlockConditions.PublicKeys, - usedKeys: make(map[uint64]struct{}), - index: i, - } - } - for i, revision := range t.FileContractRevisions { - id := types.Hash256(revision.ParentID) - _, exists := sigMap[id] - if exists { - return ErrDoubleSpend - } - - sigMap[id] = &inputSignatures{ - remainingSignatures: revision.UnlockConditions.SignaturesRequired, - possibleKeys: revision.UnlockConditions.PublicKeys, - usedKeys: make(map[uint64]struct{}), - index: i, - } - } - for i, input := range t.SiafundInputs { - id := types.Hash256(input.ParentID) - _, exists := sigMap[id] - if exists { - return ErrDoubleSpend - } - - sigMap[id] = &inputSignatures{ - remainingSignatures: input.UnlockConditions.SignaturesRequired, - possibleKeys: input.UnlockConditions.PublicKeys, - usedKeys: make(map[uint64]struct{}), - index: i, - } - } - - // Check all of the signatures for validity. - for i, sig := range t.Signatures { - // Check that sig corresponds to an entry in sigMap. - inSig, exists := sigMap[types.Hash256(sig.ParentID)] - if !exists || inSig.remainingSignatures == 0 { - return ErrFrivolousSignature - } - // Check that sig's key hasn't already been used. - _, exists = inSig.usedKeys[sig.PublicKeyIndex] - if exists { - return ErrPublicKeyOveruse - } - // Check that the public key index refers to an existing public key. - if sig.PublicKeyIndex >= uint64(len(inSig.possibleKeys)) { - return ErrInvalidPubKeyIndex - } - // Check that the timelock has expired. - if sig.Timelock > currentHeight { - return ErrPrematureSignature - } - - // Check that the signature verifies. Multiple signature schemes are - // supported. - publicKey := inSig.possibleKeys[sig.PublicKeyIndex] - switch publicKey.Algorithm { - case SignatureEntropy: - // Entropy cannot ever be used to sign a transaction. - return ErrEntropyKey - - case SignatureEd25519: - // Decode the public key and signature. - var edPK types.PublicKey - copy(edPK[:], publicKey.Key) - var edSig types.Signature - copy(edSig[:], sig.Signature) - - sigHash := SigHash(t, i, currentHeight) - ok := edPK.VerifyHash(sigHash, edSig) - if !ok { - return ErrInvalidSignature - } - - default: - // If the identifier is not recognized, assume that the signature - // is valid. This allows more signature types to be added via soft - // forking. - } - - inSig.usedKeys[sig.PublicKeyIndex] = struct{}{} - inSig.remainingSignatures-- - } - - // Check that all inputs have been sufficiently signed. - for _, reqSigs := range sigMap { - if reqSigs.remainingSignatures != 0 { - return ErrMissingSignatures - } - } - - return nil -} - -// StandaloneValid returns an error if a transaction is not valid in any -// context, for example if the same output is spent twice in the same -// transaction. StandaloneValid will not check that all outputs being spent are -// legal outputs, as it has no confirmed or unconfirmed set to look at. -func StandaloneValid(t types.Transaction, currentHeight uint64) (err error) { - err = fitsInABlock(t, currentHeight) - if err != nil { - return - } - err = followsStorageProofRules(t) - if err != nil { - return - } - err = noRepeats(t) - if err != nil { - return - } - err = followsMinimumValues(t) - if err != nil { - return - } - err = correctFileContracts(t, currentHeight) - if err != nil { - return - } - err = correctFileContractRevisions(t, currentHeight) - if err != nil { - return - } - err = correctArbitraryData(t, currentHeight) - if err != nil { - return - } - err = validUnlockConditions(t, currentHeight) - if err != nil { - return - } - err = validSignatures(t, currentHeight) - if err != nil { - return - } - return -} - -// MinimumTransactionSet takes two transaction sets as input and returns a -// combined transaction set. The first input is the set of required -// transactions, which the caller is indicating must all be a part of the final -// set.The second input is a set of related transactions that the caller -// believes may contain parent transactions of the required transactions. -// MinimumCombinedSet will scan through the related transactions and pull in any -// which are required parents of the required transactions, returning the final -// result. -// -// The final transaction set which gets returned will contain all of the -// required transactions, and will contain any of the related transactions which -// are necessary for the required transactions to be confirmed. -// -// NOTE: Both of the inputs are proper transaction sets. A proper transaction -// set is already sorted so that no parent comes after a child in the array. -func MinimumTransactionSet(requiredTxns []types.Transaction, relatedTxns []types.Transaction) []types.Transaction { - // objectID is used internally to identify which transactions create outputs - // for each other. - type objectID [32]byte - - // Track which transactions have already been scanned and added to the final - // set of required transactions. - includedTxns := make(map[types.TransactionID]struct{}) - - // Determine what the required inputs are for the provided transaction. - requiredInputs := make(map[objectID]struct{}) - for _, txn := range requiredTxns { - for _, sci := range txn.SiacoinInputs { - oid := objectID(sci.ParentID) - requiredInputs[oid] = struct{}{} - } - for _, fcr := range txn.FileContractRevisions { - oid := objectID(fcr.ParentID) - requiredInputs[oid] = struct{}{} - } - for _, sp := range txn.StorageProofs { - oid := objectID(sp.ParentID) - requiredInputs[oid] = struct{}{} - } - for _, sfi := range txn.SiafundInputs { - oid := objectID(sfi.ParentID) - requiredInputs[oid] = struct{}{} - } - includedTxns[txn.ID()] = struct{}{} - } - - // Create a list of which related transactions create which outputs. - potentialSources := make(map[objectID]*types.Transaction) - for i := 0; i < len(relatedTxns); i++ { - for j := range relatedTxns[i].SiacoinOutputs { - potentialSources[objectID(relatedTxns[i].SiacoinOutputID(j))] = &relatedTxns[i] - } - for j := range relatedTxns[i].FileContracts { - potentialSources[objectID(relatedTxns[i].FileContractID(j))] = &relatedTxns[i] - } - for j := range relatedTxns[i].SiafundOutputs { - potentialSources[objectID(relatedTxns[i].SiafundOutputID(j))] = &relatedTxns[i] - } - } - - // Cycle through all of the required inputs and find the transactions that - // contain required inputs to the provided transaction. Do so in a loop that - // will keep checking for more required inputs - visitedInputs := make(map[objectID]struct{}) - var requiredParents []types.Transaction - for len(requiredInputs) > 0 { - newRequiredInputs := make(map[objectID]struct{}) - for ri := range requiredInputs { - // First check whether we've scanned this input for required parents - // before. If so, there is no need to scan again. This clause will - // guarantee eventual termination. - _, exists := visitedInputs[ri] - if exists { - continue - } - visitedInputs[ri] = struct{}{} - - // Check if this input is available at all in the potential sources. - // If not, that means this input may already be confirmed on the - // blockchain. - txn, exists := potentialSources[ri] - if !exists { - continue - } - - // Check if this transaction has already been scanned and added as a - // requirement. - _, exists = includedTxns[txn.ID()] - if exists { - continue - } - - // If the input does have a source in the list of related - // transactions, the source also needs to have its inputs checked - // for any requirements. - requiredParents = append(requiredParents, *txn) - for _, sci := range txn.SiacoinInputs { - oid := objectID(sci.ParentID) - newRequiredInputs[oid] = struct{}{} - } - for _, fcr := range txn.FileContractRevisions { - oid := objectID(fcr.ParentID) - newRequiredInputs[oid] = struct{}{} - } - for _, sp := range txn.StorageProofs { - oid := objectID(sp.ParentID) - newRequiredInputs[oid] = struct{}{} - } - for _, sfi := range txn.SiafundInputs { - oid := objectID(sfi.ParentID) - newRequiredInputs[oid] = struct{}{} - } - } - - // All previously required inputs have been visited, but new required - // inputs may have been picked up. Now need to scan those new required - // inputs. - requiredInputs = newRequiredInputs - } - - // Build the final set. The requiredTxns are already sorted to be in the - // correct order (per the input requirements) but the required parents were - // constructed in reverse order, and therefore need to be reversed as they - // are appended. - var minSet []types.Transaction - for i := len(requiredParents) - 1; i >= 0; i-- { - minSet = append(minSet, requiredParents[i]) - } - minSet = append(minSet, requiredTxns...) - return minSet -} - -// CopyTransaction creates a deep copy of the transaction. -func CopyTransaction(txn types.Transaction) types.Transaction { - var newTxn types.Transaction - var buf bytes.Buffer - e := types.NewEncoder(&buf) - txn.EncodeTo(e) - e.Flush() - d := types.NewDecoder(io.LimitedReader{R: &buf, N: int64(buf.Len())}) - newTxn.DecodeFrom(d) - return newTxn -} - -// ExplicitCoveredFields returns a CoveredFields that covers all elements -// present in txn. -func ExplicitCoveredFields(txn types.Transaction) (cf types.CoveredFields) { - for i := range txn.SiacoinInputs { - cf.SiacoinInputs = append(cf.SiacoinInputs, uint64(i)) - } - for i := range txn.SiacoinOutputs { - cf.SiacoinOutputs = append(cf.SiacoinOutputs, uint64(i)) - } - for i := range txn.FileContracts { - cf.FileContracts = append(cf.FileContracts, uint64(i)) - } - for i := range txn.FileContractRevisions { - cf.FileContractRevisions = append(cf.FileContractRevisions, uint64(i)) - } - for i := range txn.StorageProofs { - cf.StorageProofs = append(cf.StorageProofs, uint64(i)) - } - for i := range txn.SiafundInputs { - cf.SiafundInputs = append(cf.SiafundInputs, uint64(i)) - } - for i := range txn.SiafundOutputs { - cf.SiafundOutputs = append(cf.SiafundOutputs, uint64(i)) - } - for i := range txn.MinerFees { - cf.MinerFees = append(cf.MinerFees, uint64(i)) - } - for i := range txn.ArbitraryData { - cf.ArbitraryData = append(cf.ArbitraryData, uint64(i)) - } - for i := range txn.Signatures { - cf.Signatures = append(cf.Signatures, uint64(i)) - } - return -} - -// FullCoveredFields returns a CoveredFields that covers the whole transaction. -func FullCoveredFields() types.CoveredFields { - return types.CoveredFields{WholeTransaction: true,} -} diff --git a/modules/wallet.go b/modules/wallet.go index 55b0868..959ffc2 100644 --- a/modules/wallet.go +++ b/modules/wallet.go @@ -4,456 +4,95 @@ import ( "errors" "time" + "go.sia.tech/core/consensus" "go.sia.tech/core/types" ) -const ( - // PublicKeysPerSeed define the number of public keys that get pregenerated - // for a seed at startup when searching for balances in the blockchain. - PublicKeysPerSeed = 2500 -) - var ( - // ErrBadEncryptionKey is returned if the incorrect encryption key to a - // file is provided. - ErrBadEncryptionKey = errors.New("provided encryption key is incorrect") - - // ErrIncompleteTransactions is returned if the wallet has incomplete - // transactions being built that are using all of the current outputs, and - // therefore the wallet is unable to spend money despite it not technically - // being 'unconfirmed' yet. - ErrIncompleteTransactions = errors.New("wallet has coins spent in incomplete transactions - not enough remaining coins") - - // ErrLockedWallet is returned when an action cannot be performed due to - // the wallet being locked. - ErrLockedWallet = errors.New("wallet must be unlocked before it can be used") - - // ErrLowBalance is returned if the wallet does not have enough funds to - // complete the desired action. - ErrLowBalance = errors.New("insufficient balance") - - // ErrWalletShutdown is returned when a method can't continue execution due - // to the wallet shutting down. - ErrWalletShutdown = errors.New("wallet is shutting down") + // ErrInsufficientBalance is returned when there aren't enough unused outputs + // to cover the requested amount. + ErrInsufficientBalance = errors.New("insufficient balance") ) -type ( - // Seed is cryptographic entropy that is used to derive spendable wallet - // addresses. - Seed [16]byte - - // WalletKey is the key used to encrypt the wallet. - WalletKey []byte - - // WalletTransactionID is a unique identifier for a wallet transaction. - WalletTransactionID types.Hash256 - - // A ProcessedInput represents funding to a transaction. The input is - // coming from an address and going to the outputs. The fund types are - // 'SiacoinInput', 'SiafundInput'. - ProcessedInput struct { - ParentID types.Hash256 `json:"parentid"` - FundType types.Specifier `json:"fundtype"` - WalletAddress bool `json:"walletaddress"` - RelatedAddress types.Address `json:"relatedaddress"` - Value types.Currency `json:"value"` - } - - // A ProcessedOutput is a Siacoin output that appears in a transaction. - // Some outputs mature immediately, some are delayed, and some may never - // mature at all (in the event of storage proofs). - // - // Fund type can either be 'SiacoinOutput', 'SiafundOutput', 'ClaimOutput', - // 'MinerPayout', or 'MinerFee'. All outputs except the miner fee create - // outputs accessible to an address. Miner fees are not spendable, and - // instead contribute to the block subsidy. - // - // MaturityHeight indicates at what block height the output becomes - // available. SiacoinInputs and SiafundInputs become available immediately. - // ClaimInputs and MinerPayouts become available after 144 confirmations. - ProcessedOutput struct { - ID types.Hash256 `json:"id"` - FundType types.Specifier `json:"fundtype"` - MaturityHeight uint64 `json:"maturityheight"` - WalletAddress bool `json:"walletaddress"` - RelatedAddress types.Address `json:"relatedaddress"` - Value types.Currency `json:"value"` - } - - // A ProcessedTransaction is a transaction that has been processed into - // explicit inputs and outputs and tagged with some header data such as - // confirmation height + timestamp. - // - // Because of the block subsidy, a block is considered as a transaction. - // Since there is technically no transaction id for the block subsidy, the - // block id is used instead. - ProcessedTransaction struct { - Transaction types.Transaction `json:"transaction"` - TransactionID types.TransactionID `json:"transactionid"` - ConfirmationHeight uint64 `json:"confirmationheight"` - ConfirmationTimestamp time.Time `json:"confirmationtimestamp"` - - Inputs []ProcessedInput `json:"inputs"` - Outputs []ProcessedOutput `json:"outputs"` - } - - // ValuedTransaction is a transaction that has been given incoming and - // outgoing siacoin value fields. - ValuedTransaction struct { - ProcessedTransaction - - ConfirmedIncomingValue types.Currency `json:"confirmedincomingvalue"` - ConfirmedOutgoingValue types.Currency `json:"confirmedoutgoingvalue"` - } - - // A UnspentOutput is a SiacoinOutput or SiafundOutput that the wallet - // is tracking. - UnspentOutput struct { - ID types.Hash256 `json:"id"` - FundType types.Specifier `json:"fundtype"` - UnlockHash types.Address `json:"unlockhash"` - Value types.Currency `json:"value"` - ConfirmationHeight uint64 `json:"confirmationheight"` - IsWatchOnly bool `json:"iswatchonly"` - } - - // TransactionBuilder is used to construct transactions. - TransactionBuilder interface { - // FundTransaction adds siacoin inputs worth at least the requested - // amount to the provided transaction. A change output is also added, - // if necessary. The inputs will not be available to future calls to - // FundTransaction unless ReleaseInputs is called. - FundTransaction(txn *types.Transaction, amount types.Currency) (types.Transaction, []types.Hash256, error) - - // MarkWalletInputs scans a transaction and infers which inputs belong - // to this wallet. This allows those inputs to be signed. - MarkWalletInputs(txn types.Transaction) ([]types.Hash256) - - // ReleaseInputs is a helper function that releases the inputs of txn - // for use in other transactions. It should only be called on - // transactions that are invalid or will never be broadcast. - ReleaseInputs(txnSet []types.Transaction) - - // Sign will sign any inputs added by FundTransaction. - Sign(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error - } - - // EncryptionManager can encrypt, lock, unlock, and indicate the current - // status of the EncryptionManager. - EncryptionManager interface { - // Encrypt will encrypt the wallet using the input key. Upon - // encryption, a primary seed will be created for the wallet (no seed - // exists prior to this point). If the key is blank, then the hash of - // the seed that is generated will be used as the key. - // - // Encrypt can only be called once throughout the life of the wallet - // and will return an error on subsequent calls (even after restarting - // the wallet). To reset the wallet, the wallet must be deleted. - Encrypt(masterKey WalletKey) (Seed, error) - - // Reset will reset the wallet, clearing the database and returning it to - // the unencrypted state. Reset can only be called on a wallet that has - // already been encrypted. - Reset() error - - // Encrypted returns whether or not the wallet has been encrypted yet. - // After being encrypted for the first time, the wallet can only be - // unlocked using the encryption password. - Encrypted() (bool, error) - - // InitFromSeed functions like Encrypt, but using a specified seed. - // Unlike Encrypt, the blockchain will be scanned to determine the - // seed's progress. For this reason, InitFromSeed should not be called - // until the blockchain is fully synced. - InitFromSeed(masterKey WalletKey, seed Seed) error - - // Lock deletes all keys in memory and prevents the wallet from being - // used to spend coins or extract keys until 'Unlock' is called. - Lock() error - - // Unlock must be called before the wallet is usable. All wallets and - // wallet seeds are encrypted by default, and the wallet will not know - // which addresses to watch for on the blockchain until unlock has been - // called. - // - // All items in the wallet are encrypted using different keys which are - // derived from the master key. - Unlock(masterKey WalletKey) error - - // UnlockAsync must be called before the wallet is usable. All wallets and - // wallet seeds are encrypted by default, and the wallet will not know - // which addresses to watch for on the blockchain until unlock has been - // called. - // UnlockAsync will return a channel as soon as the wallet is unlocked but - // before the wallet is caught up to consensus. - // - // All items in the wallet are encrypted using different keys which are - // derived from the master key. - UnlockAsync(masterKey WalletKey) <-chan error - - // ChangeKey changes the wallet's materKey from masterKey to newKey, - // re-encrypting the wallet with the provided key. - ChangeKey(masterKey WalletKey, newKey WalletKey) error - - // IsMasterKey verifies that the masterKey is the key used to encrypt - // the wallet. - IsMasterKey(masterKey WalletKey) (bool, error) - - // ChangeKeyWithSeed is the same as ChangeKey but uses the primary seed - // instead of the current masterKey. - ChangeKeyWithSeed(seed Seed, newKey WalletKey) error +// Wallet stores and manages Siacoins. +type Wallet interface { + // AddressBalance returns the balance of the given address. + AddressBalance(addr types.Address) (siacoins types.Currency, siafunds uint64) - // Unlocked returns true if the wallet is currently unlocked, false - // otherwise. - Unlocked() (bool, error) - } + // Addresses returns the addresses of the wallet. + Addresses() (addrs []types.Address) - // KeyManager manages wallet keys, including the use of seeds, creating and - // loading backups, and providing a layer of compatibility for older wallet - // files. - KeyManager interface { - // AllAddresses returns all addresses that the wallet is able to spend - // from, including unseeded addresses. Addresses are returned sorted in - // byte-order. - AllAddresses() ([]types.Address, error) + // AddWatch adds the given watched address to the wallet. + AddWatch(addr types.Address) error - // AllSeeds returns all of the seeds that are being tracked by the - // wallet, including the primary seed. Only the primary seed is used to - // generate new addresses, but the wallet can spend funds sent to - // public keys generated by any of the seeds returned. - AllSeeds() ([]Seed, error) + // Annotate annotates a transaction set. + Annotate(txns []types.Transaction) (ptxns []PoolTransaction) - // LastAddresses returns the last n addresses starting at the last seedProgress - // for which an address was generated. - LastAddresses(n uint64) ([]types.Address, error) + // Close shuts down the wallet. + Close() error - // LoadSeed will recreate a wallet file using the recovery phrase. - // LoadSeed only needs to be called if the original seed file or - // encryption password was lost. The master key is used to encrypt the - // recovery seed before saving it to disk. - LoadSeed(WalletKey, Seed) error + // ConfirmedBalance returns the total balance of the wallet. + ConfirmedBalance() (siacoins, immatureSiacoins types.Currency, siafunds uint64) - // MarkAddressUnused marks the provided address as unused which causes it to be - // handed out by a subsequent call to `NextAddresses` again. - MarkAddressUnused(...types.UnlockConditions) error + // Fund adds Siacoin inputs with the required amount to the transaction. + Fund(txn *types.Transaction, amount types.Currency) (parents []types.Transaction, toSign []types.Hash256, err error) - // NextAddress returns a new coin address generated from the - // primary seed. - NextAddress() (types.UnlockConditions, error) + // MarkAddressUnused marks the provided address as unused which causes it to be + // handed out by a subsequent call to `NextAddresses` again. + MarkAddressUnused(addrs ...types.UnlockConditions) error - // NextAddresses returns n new coin addresses generated from the - // primary seed. - NextAddresses(uint64) ([]types.UnlockConditions, error) + // MarkWalletInputs scans a transaction and infers which inputs belong to this + // wallet. This allows those inputs to be signed. + MarkWalletInputs(txn types.Transaction) (toSign []types.Hash256) - // PrimarySeed returns the unencrypted primary seed of the wallet, - // along with a uint64 indicating how many addresses may be safely - // generated from the seed. - PrimarySeed() (Seed, uint64, error) + // NextAddress returns an unlock hash that is ready to receive Siacoins or + // Siafunds. + NextAddress() (types.UnlockConditions, error) - // SignTransaction adds a signature to each of the specified inputs. - SignTransaction(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error + // Release marks the outputs as unused. + Release(txnSet []types.Transaction) - // SweepSeed scans the blockchain for outputs generated from seed and - // creates a transaction that transfers them to the wallet. Note that - // this incurs a transaction fee. It returns the total value of the - // outputs, minus the fee. If only Siafunds were found, the fee is - // deducted from the wallet. - SweepSeed(seed Seed) (coins types.Currency, funds uint64, err error) - } + // RemoveWatch removes the given watched address from the wallet. + RemoveWatch(addr types.Address) error - // SiacoinSenderMulti is the minimal interface for an object that can send - // money to multiple siacoin outputs at once. - SiacoinSenderMulti interface { - // SendSiacoinsMulti sends coins to multiple addresses. - SendSiacoinsMulti(outputs []types.SiacoinOutput) ([]types.Transaction, error) - } + // Reserve reserves the given ids for the given duration. + Reserve(ids []types.Hash256, duration time.Duration) error - // Wallet stores and manages Siacoins and Siafunds. The wallet file is - // encrypted using a user-specified password. Common addresses are all - // derived from a single address seed. - Wallet interface { - Alerter - EncryptionManager - KeyManager - TransactionBuilder + // RenterSeed derives a renter seed. + RenterSeed(email string) []byte - // AddUnlockConditions adds a set of UnlockConditions to the wallet database. - AddUnlockConditions(uc types.UnlockConditions) error + // SendSiacoins creates a transaction sending 'amount' to 'dest'. The + // transaction is submitted to the transaction pool and is also returned. Fees + // are added to the amount sent. + SendSiacoins(amount types.Currency, dest types.Address) ([]types.Transaction, error) - // AddWatchAddresses instructs the wallet to begin tracking a set of - // addresses, in addition to the addresses it was previously tracking. - // If none of the addresses have appeared in the blockchain, the - // unused flag may be set to true. Otherwise, the wallet must rescan - // the blockchain to search for transactions containing the addresses. - AddWatchAddresses(addrs []types.Address, unused bool) error + // Sign signs the specified transaction using keys derived from the wallet seed. + Sign(cs consensus.State, txn *types.Transaction, toSign []types.Hash256) error - // Close permits clean shutdown during testing and serving. - Close() error + // Tip returns the wallet's internal processed chain index. + Tip() types.ChainIndex - // ConfirmedBalance returns the confirmed balance of the wallet, minus - // any outgoing transactions. ConfirmedBalance will include unconfirmed - // refund transactions. - ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance uint64, siacoinClaimBalance types.Currency, err error) + // UnconfirmedBalance returns the balance of the wallet contained in + // the unconfirmed transactions. + UnconfirmedBalance() (outgoing, incoming types.Currency) - // UnconfirmedBalance returns the unconfirmed balance of the wallet. - // Outgoing funds and incoming funds are reported separately. Refund - // outputs are included, meaning that sending a single coin to - // someone could result in 'outgoing: 12, incoming: 11'. Siafunds are - // not considered in the unconfirmed balance. - UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency, err error) + // UnspentSiacoinOutputs returns the unspent SC outputs of the wallet. + UnspentSiacoinOutputs() (sces []types.SiacoinElement) - // Height returns the wallet's internal processed consensus height. - Height() (uint64, error) - - // AddressTransactions returns all of the transactions that are related - // to a given address. - AddressTransactions(types.Address) ([]ProcessedTransaction, error) - - // AddressUnconfirmedHistory returns all of the unconfirmed - // transactions related to a given address. - AddressUnconfirmedTransactions(types.Address) ([]ProcessedTransaction, error) - - // Transaction returns the transaction with the given id. The bool - // indicates whether the transaction is in the wallet database. The - // wallet only stores transactions that are related to the wallet. - Transaction(types.TransactionID) (ProcessedTransaction, bool, error) - - // Transactions returns all of the transactions that were confirmed at - // heights [startHeight, endHeight]. Unconfirmed transactions are not - // included. - Transactions(startHeight uint64, endHeight uint64) ([]ProcessedTransaction, error) - - // UnconfirmedTransactions returns all unconfirmed transactions - // relative to the wallet. - UnconfirmedTransactions() ([]ProcessedTransaction, error) - - // RemoveWatchAddresses instructs the wallet to stop tracking a set of - // addresses and delete their associated transactions. If none of the - // addresses have appeared in the blockchain, the unused flag may be - // set to true. Otherwise, the wallet must rescan the blockchain to - // rebuild its transaction history. - RemoveWatchAddresses(addrs []types.Address, unused bool) error - - // Rescanning reports whether the wallet is currently rescanning the - // blockchain. - Rescanning() (bool, error) - - // Settings returns the Wallet's current settings. - Settings() (WalletSettings, error) - - // SetSettings sets the Wallet's settings. - SetSettings(WalletSettings) error - - // SendSiacoins is a tool for sending Siacoins from the wallet to an - // address. Sending money usually results in multiple transactions. The - // transactions are automatically given to the transaction pool, and are - // also returned to the caller. - SendSiacoins(amount types.Currency, dest types.Address) ([]types.Transaction, error) - - // SendSiacoinsFeeIncluded sends Siacoins with fees included. - SendSiacoinsFeeIncluded(amount types.Currency, dest types.Address) ([]types.Transaction, error) - - SiacoinSenderMulti - - // DustThreshold returns the quantity per byte below which a Currency is - // considered to be Dust. - DustThreshold() (types.Currency, error) - - // UnspentOutputs returns the unspent outputs tracked by the wallet. - UnspentOutputs() ([]UnspentOutput, error) - - // UnlockConditions returns the UnlockConditions for the specified - // address, if they are known to the wallet. - UnlockConditions(addr types.Address) (types.UnlockConditions, error) - - // WatchAddresses returns the set of addresses that the wallet is - // currently watching. - WatchAddresses() ([]types.Address, error) - } - - // WalletSettings control the behavior of the Wallet. - WalletSettings struct { - NoDefrag bool `json:"nodefrag"` - } -) - -// EncodeTo implements types.EncoderTo interface. -func (pt * ProcessedTransaction) EncodeTo(e *types.Encoder) { - pt.Transaction.EncodeTo(e) - pt.TransactionID.EncodeTo(e) - e.WriteUint64(pt.ConfirmationHeight) - e.WriteUint64(uint64(pt.ConfirmationTimestamp.Unix())) - e.WritePrefix(len(pt.Inputs)) - for _, input := range pt.Inputs { - input.EncodeTo(e) - } - e.WritePrefix(len(pt.Outputs)) - for _, output := range pt.Outputs { - output.EncodeTo(e) - } -} - -// DecodeFrom implements types.DecoderFrom interface. -func (pt * ProcessedTransaction) DecodeFrom(d *types.Decoder) { - pt.Transaction.DecodeFrom(d) - pt.TransactionID.DecodeFrom(d) - pt.ConfirmationHeight = d.ReadUint64() - pt.ConfirmationTimestamp = time.Unix(int64(d.ReadUint64()), 0) - n := d.ReadPrefix() - pt.Inputs = make([]ProcessedInput, n) - for i := 0; i < n; i++ { - pt.Inputs[i].DecodeFrom(d) - } - n = d.ReadPrefix() - pt.Outputs = make([]ProcessedOutput, n) - for i := 0; i < n; i++ { - pt.Outputs[i].DecodeFrom(d) - } -} - -// EncodeTo implements types.EncoderTo interface. -func (pi *ProcessedInput) EncodeTo(e *types.Encoder) { - pi.ParentID.EncodeTo(e) - pi.FundType.EncodeTo(e) - e.WriteBool(pi.WalletAddress) - pi.RelatedAddress.EncodeTo(e) - pi.Value.EncodeTo(e) -} - -// DecodeFrom implements types.DecoderFrom interface. -func (pi *ProcessedInput) DecodeFrom(d *types.Decoder) { - pi.ParentID.DecodeFrom(d) - pi.FundType.DecodeFrom(d) - pi.WalletAddress = d.ReadBool() - pi.RelatedAddress.DecodeFrom(d) - pi.Value.DecodeFrom(d) -} - -// EncodeTo implements types.EncoderTo interface. -func (po *ProcessedOutput) EncodeTo(e *types.Encoder) { - po.ID.EncodeTo(e) - po.FundType.EncodeTo(e) - e.WriteUint64(po.MaturityHeight) - e.WriteBool(po.WalletAddress) - po.RelatedAddress.EncodeTo(e) - po.Value.EncodeTo(e) -} + // UnspentSiafundOutputs returns the unspent SF outputs of the wallet. + UnspentSiafundOutputs() (sfes []types.SiafundElement) -// DecodeFrom implements types.DecoderFrom interface. -func (po *ProcessedOutput) DecodeFrom(d *types.Decoder) { - po.ID.DecodeFrom(d) - po.FundType.DecodeFrom(d) - po.MaturityHeight = d.ReadUint64() - po.WalletAddress = d.ReadBool() - po.RelatedAddress.DecodeFrom(d) - po.Value.DecodeFrom(d) + // WatchedAddresses returns a list of the addresses watched by the wallet. + WatchedAddresses() (addrs []types.Address) } -// CalculateWalletTransactionID is a helper function for determining the id of -// a wallet transaction. -func CalculateWalletTransactionID(tid types.TransactionID, oid types.Hash256) WalletTransactionID { - h := types.NewHasher() - tid.EncodeTo(h.E) - oid.EncodeTo(h.E) - return WalletTransactionID(h.Sum()) +// A PoolTransaction summarizes the wallet-relevant data in a txpool +// transaction. +type PoolTransaction struct { + ID types.TransactionID `json:"id"` + Raw types.Transaction `json:"raw"` + Type string `json:"type"` + Sent types.Currency `json:"sent"` + Received types.Currency `json:"received"` + Locked types.Currency `json:"locked"` } diff --git a/modules/wallet/alert.go b/modules/wallet/alert.go deleted file mode 100644 index 844a009..0000000 --- a/modules/wallet/alert.go +++ /dev/null @@ -1,10 +0,0 @@ -package wallet - -import ( - "github.com/mike76-dev/sia-satellite/modules" -) - -// Alerts implements the Alerter interface for the wallet. -func (w *Wallet) Alerts() (crit, err, warn, info []modules.Alert) { - return -} diff --git a/modules/wallet/consts.go b/modules/wallet/consts.go deleted file mode 100644 index 29bddec..0000000 --- a/modules/wallet/consts.go +++ /dev/null @@ -1,43 +0,0 @@ -package wallet - -import ( - "go.sia.tech/core/types" -) - -const ( - // defragBatchSize defines how many outputs are combined during one defrag. - defragBatchSize = 35 - - // defragStartIndex is the number of outputs to skip over when performing a - // defrag. - defragStartIndex = 10 - - // defragThreshold is the number of outputs a wallet is allowed before it is - // defragmented. - defragThreshold = 50 -) - -const ( - // lookaheadBuffer together with lookaheadRescanThreshold defines the constant part - // of the maxLookahead. - lookaheadBuffer = uint64(4000) - - // lookaheadRescanThreshold is the number of keys in the lookahead that will be - // generated before a complete wallet rescan is initialized. - lookaheadRescanThreshold = uint64(1000) -) - -var ( - // Specifiers. - specifierMinerPayout = types.NewSpecifier("miner payout") - specifierMinerFee = types.NewSpecifier("miner fee") - specifierSiacoinInput = types.NewSpecifier("siacoin input") - specifierSiafundInput = types.NewSpecifier("siafund input") - specifierClaimOutput = types.NewSpecifier("claim output") -) - -// maxLookahead returns the size of the lookahead for a given seed progress -// which usually is the current primarySeedProgress. -func maxLookahead(start uint64) uint64 { - return start + lookaheadRescanThreshold + lookaheadBuffer + start / 10 -} diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 85d7435..5bf1bb8 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -1,707 +1,595 @@ package wallet import ( - "bytes" "database/sql" "errors" - "fmt" - "time" "github.com/mike76-dev/sia-satellite/modules" - "go.sia.tech/core/types" - - "lukechampine.com/frand" ) -// threadedDBUpdate commits the active database transaction and starts a new -// transaction. -func (w *Wallet) threadedDBUpdate() { - if err := w.tg.Add(); err != nil { - return +// insertAddress inserts a new wallet address. +func (w *Wallet) insertAddress(addr types.Address) error { + res, err := w.tx.Exec("INSERT INTO wt_addresses (addr) VALUES (?)", addr[:]) + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't insert address") } - defer w.tg.Done() - for { - select { - case <-time.After(2 * time.Minute): - case <-w.tg.StopChan(): - return - } - w.mu.Lock() - err := w.syncDB() - w.mu.Unlock() - if err != nil { - w.log.Severe("ERROR: syncDB encountered an error:", err) - panic("wallet syncing error") - } + index, err := res.LastInsertId() + if err != nil { + return err } + w.addrs[addr] = uint64(index) + + return nil } -// syncDB commits the current global transaction and immediately begins a -// new one. It must be called with a write-lock. -func (w *Wallet) syncDB() error { - // Check if we are not syncing already. - if w.syncing { - return nil - } - w.syncing = true - defer func() { - w.syncing = false - }() +// AddWatch adds the given watched address. +func (w *Wallet) AddWatch(addr types.Address) error { + w.mu.Lock() + defer w.mu.Unlock() - // If the rollback flag is set, it means that somewhere in the middle of an - // atomic update there was a failure, and that failure needs to be rolled - // back. An error will be returned. - if w.dbRollback { - err := errors.New("database unable to sync - rollback requested") - return modules.ComposeErrors(err, w.dbTx.Rollback()) + index, ok := w.addrs[addr] + if !ok { + return errors.New("address not found") } - // Commit the current tx. - err := w.dbTx.Commit() - if err != nil { - w.log.Severe("ERROR: failed to apply database update:", err) - err = modules.ComposeErrors(err, w.dbTx.Rollback()) - return modules.AddContext(err, "unable to commit dbTx in syncDB") - } - // Begin a new tx. - w.dbTx, err = w.db.Begin() + w.watchedAddrs[addr] = index + _, err := w.tx.Exec("INSERT INTO wt_watched (address_id) VALUES (?)", index) if err != nil { - w.log.Severe("ERROR: failed to start database update:", err) - return modules.AddContext(err, "unable to begin new dbTx in syncDB") + w.dbError = true + return modules.AddContext(err, "couldn't insert watched address") } + return nil } -// dbReset wipes and reinitializes a wallet database. -func dbReset(tx *sql.Tx) error { - _, err := tx.Exec("DELETE FROM wt_addr") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_txn") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_sco") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_sfo") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_spo") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_uc") - if err != nil { - return err - } - _, err = tx.Exec(` - REPLACE INTO wt_info (id, cc, height, encrypted, sfpool, salt, progress, seed, pwd) - VALUES (1, ?, ?, ?, ?, ?, ?, ?, ?) - `, modules.ConsensusChangeBeginning[:], 0, []byte{}, []byte{}, frand.Bytes(len(walletSalt{})), 0, []byte{}, []byte{}) - if err != nil { - return err - } +// RemoveWatch removes the given watched address. +func (w *Wallet) RemoveWatch(addr types.Address) error { + w.mu.Lock() + defer w.mu.Unlock() - _, err = tx.Exec("DELETE FROM wt_aux") + delete(w.watchedAddrs, addr) + _, err := w.tx.Exec(` + DELETE FROM wt_watched + WHERE address_id IN ( + SELECT id FROM wt_addresses + WHERE addr = ? + ) + `, addr[:]) if err != nil { - return err + w.dbError = true + return modules.AddContext(err, "couldn't delete address") } - _, err = tx.Exec("DELETE FROM wt_keys") - if err != nil { - return err - } - - _, err = tx.Exec("DELETE FROM wt_watch") - return err -} - -// dbPutSiacoinOutput inserts a Siacoin output into the database. -func dbPutSiacoinOutput(tx *sql.Tx, id types.SiacoinOutputID, output types.SiacoinOutput) error { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - output.EncodeTo(e) - e.Flush() - _, err := tx.Exec(` - INSERT INTO wt_sco (scoid, bytes) - VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE bytes = new.bytes - `, id[:], buf.Bytes()) - return err + return nil } -// dbDeleteSiacoinOutput removes a Siacoin output from the database. -func dbDeleteSiacoinOutput(tx *sql.Tx, id types.SiacoinOutputID) error { - _, err := tx.Exec("DELETE FROM wt_sco WHERE scoid = ?", id[:]) - return err -} +// WatchedAddresses returns a list of the addresses watched by the wallet. +func (w *Wallet) WatchedAddresses() (addrs []types.Address) { + w.mu.Lock() + defer w.mu.Unlock() -// dbForEachSiacoinOutput performs an action on each Siacoin output. -func dbForEachSiacoinOutput(tx *sql.Tx, fn func(types.SiacoinOutputID, types.SiacoinOutput)) error { - rows, err := tx.Query("SELECT scoid, bytes FROM wt_sco") - if err != nil { - return err + for addr := range w.watchedAddrs { + addrs = append(addrs, addr) } - outputs := make(map[types.SiacoinOutputID]types.SiacoinOutput) - for rows.Next() { - var scoid types.SiacoinOutputID - var sco types.SiacoinOutput - id := make([]byte, 32) - var scoBytes []byte - if err := rows.Scan(&id, &scoBytes); err != nil { - rows.Close() - return err - } - copy(scoid[:], id) - d := types.NewBufDecoder(scoBytes) - sco.DecodeFrom(d) - if err := d.Err(); err != nil { - rows.Close() - return err - } - outputs[scoid] = sco - } - rows.Close() + return addrs +} - for scoid, sco := range outputs { - fn(scoid, sco) +// updateTip updates the current tip of the wallet. +func (w *Wallet) updateTip(ci types.ChainIndex) error { + w.tip = ci + _, err := w.tx.Exec(` + REPLACE INTO wt_tip (id, height, bid) + VALUES (1, ?, ?) + `, ci.Height, ci.ID[:]) + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't update tip") } return nil } -// dbPutSiafundOutput inserts a Siafund output into the database. -func dbPutSiafundOutput(tx *sql.Tx, id types.SiafundOutputID, output types.SiafundOutput) error { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - output.EncodeTo(e) - e.Flush() - _, err := tx.Exec(` - INSERT INTO wt_sfo (sfoid, bytes) - VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE bytes = new.bytes - `, id[:], buf.Bytes()) - return err -} +// insertSiacoinElement inserts the given Siacoin element. +func (w *Wallet) insertSiacoinElement(sce types.SiacoinElement) error { + sce.MerkleProof = append([]types.Hash256(nil), sce.MerkleProof...) + w.sces[sce.SiacoinOutput.Address] = sce + _, err := w.tx.Exec(` + INSERT INTO wt_sces ( + scoid, + sc_value, + merkle_proof, + leaf_index, + maturity_height, + address_id + ) + VALUES (?, ?, ?, ?, ?, ( + SELECT id FROM wt_addresses + WHERE addr = ? + )) + `, + sce.ID[:], + encodeCurrency(sce.SiacoinOutput.Value), + encodeProof(sce.MerkleProof), + sce.LeafIndex, + sce.MaturityHeight, + sce.SiacoinOutput.Address[:], + ) + if err != nil { + w.dbError = true + } -// dbDeleteSiafundOutput removes a Siafund output from the database. -func dbDeleteSiafundOutput(tx *sql.Tx, id types.SiafundOutputID) error { - _, err := tx.Exec("DELETE FROM wt_sfo WHERE sfoid = ?", id[:]) return err } -// dbForEachSiafundOutput performs an action on each Siafund output. -func dbForEachSiafundOutput(tx *sql.Tx, fn func(types.SiafundOutputID, types.SiafundOutput, types.Currency)) error { - rows, err := tx.Query("SELECT sfoid, bytes FROM wt_sfo") +// deleteSiacoinElement deletes the Siacoin element with the given ID. +func (w *Wallet) deleteSiacoinElement(addr types.Address) error { + delete(w.sces, addr) + _, err := w.tx.Exec(` + DELETE FROM wt_sces + WHERE address_id IN ( + SELECT id FROM wt_addresses + WHERE addr = ? + ) + `, addr[:]) if err != nil { - return err - } - - type extendedSiafundOutput struct { - output types.SiafundOutput - claimStart types.Currency - } - - outputs := make(map[types.SiafundOutputID]extendedSiafundOutput) - for rows.Next() { - var sfoid types.SiafundOutputID - var sfo types.SiafundOutput - var claimStart types.Currency - id := make([]byte, 32) - var sfoBytes []byte - if err := rows.Scan(&id, &sfoBytes); err != nil { - rows.Close() - return err - } - copy(sfoid[:], id) - d := types.NewBufDecoder(sfoBytes) - var val types.Currency - val.DecodeFrom(d) - sfo.Value = val.Lo - sfo.Address.DecodeFrom(d) - claimStart.DecodeFrom(d) - if err := d.Err(); err != nil { - rows.Close() - return err - } - outputs[sfoid] = extendedSiafundOutput{ - output: sfo, - claimStart: claimStart, - } + w.dbError = true } - rows.Close() - for sfoid, esfo := range outputs { - fn(sfoid, esfo.output, esfo.claimStart) - } - - return nil -} - -// dbPutSpentOutput inserts a new spent output into the database. -func dbPutSpentOutput(tx *sql.Tx, id types.Hash256, height uint64) error { - _, err := tx.Exec(` - INSERT INTO wt_spo (oid, height) - VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE height = new.height - `, id[:], height) return err } -// dbGetSpentOutput retrieves a spent output from the database. -func dbGetSpentOutput(tx *sql.Tx, id types.Hash256) (height uint64, err error) { - err = tx.QueryRow("SELECT height FROM wt_spo WHERE oid = ?", id[:]).Scan(&height) - return -} +// insertSiafundElement inserts the given Siafund element. +func (w *Wallet) insertSiafundElement(sfe types.SiafundElement) error { + sfe.MerkleProof = append([]types.Hash256(nil), sfe.MerkleProof...) + w.sfes[sfe.SiafundOutput.Address] = sfe + _, err := w.tx.Exec(` + INSERT INTO wt_sfes ( + sfoid, + claim_start, + merkle_proof, + leaf_index, + sf_value, + address_id + ) + VALUES (?, ?, ?, ?, ?, ( + SELECT id FROM wt_addresses + WHERE addr = ? + )) + `, + sfe.ID[:], + encodeCurrency(sfe.ClaimStart), + encodeProof(sfe.MerkleProof), + sfe.LeafIndex, + sfe.SiafundOutput.Value, + sfe.SiafundOutput.Address[:], + ) + if err != nil { + w.dbError = true + } -// dbDeleteSpentOutput removes a spent output from the database. -func dbDeleteSpentOutput(tx *sql.Tx, id types.Hash256) error { - _, err := tx.Exec("DELETE FROM wt_spo WHERE oid = ?", id[:]) return err } -// dbGetAddrTransactions retrieves an address-txn mapping from the database. -func dbGetAddrTransactions(tx *sql.Tx, addr types.Address) (txns []types.TransactionID, err error) { - rows, err := tx.Query("SELECT txid FROM wt_addr WHERE addr = ?", addr[:]) +// deleteSiafundElement deletes the Siafund element with the given ID. +func (w *Wallet) deleteSiafundElement(addr types.Address) error { + delete(w.sfes, addr) + _, err := w.tx.Exec(` + DELETE FROM wt_sfes + WHERE address_id IN ( + SELECT id FROM wt_addresses + WHERE addr = ? + ) + `, addr[:]) if err != nil { - return nil, err + w.dbError = true } - defer rows.Close() - for rows.Next() { - var txid types.TransactionID - id := make([]byte, 32) - if err := rows.Scan(&id); err != nil { - return nil, err - } - copy(txid[:], id) - txns = append(txns, txid) - } - return txns, nil -} -// dbPutUnlockConditions adds new UnlockConditions to the database. -func dbPutUnlockConditions(tx *sql.Tx, uc types.UnlockConditions) error { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - uc.EncodeTo(e) - e.Flush() - uh := uc.UnlockHash() - _, err := tx.Exec(` - INSERT INTO wt_uc (addr, bytes) - VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE bytes = new.bytes - `, uh[:], buf.Bytes()) return err } -// dbGetUnlockConditions retrieves UnlockConditions from the database. -func dbGetUnlockConditions(tx *sql.Tx, addr types.Address) (uc types.UnlockConditions, err error) { - var ucBytes []byte - err = tx.QueryRow("SELECT bytes FROM wt_uc WHERE addr = ?", addr[:]).Scan(&ucBytes) +// updateSiacoinElementProofs updates the Merkle proofs on each SC element. +func (w *Wallet) updateSiacoinElementProofs(cu chainUpdate) error { + updateStmt, err := w.tx.Prepare(` + UPDATE wt_sces + SET merkle_proof = ?, leaf_index = ? + WHERE scoid = ? + `) if err != nil { - return + return modules.AddContext(err, "failed to prepare update statement") } - d := types.NewBufDecoder(ucBytes) - uc.DecodeFrom(d) - return uc, d.Err() -} + defer updateStmt.Close() -// dbAddAddrTransaction appends a single transaction ID to the set of -// transactions associated with addr. If the ID is already in the set, it is -// not added again. -func dbAddAddrTransaction(tx *sql.Tx, addr types.Address, txid types.TransactionID) error { - var c int - err := tx.QueryRow("SELECT COUNT(*) FROM wt_addr WHERE addr = ? AND txid = ?", addr[:], txid[:]).Scan(&c) - if err != nil { - return err - } - if c > 0 { - return nil - } - _, err = tx.Exec("INSERT INTO wt_addr (addr, txid) VALUES (?, ?)", addr[:], txid[:]) - return err -} - -// dbAddProcessedTransactionAddrs updates the address-txn mappings to associate -// every address in pt with txid. -func dbAddProcessedTransactionAddrs(tx *sql.Tx, pt modules.ProcessedTransaction) error { - addrs := make(map[types.Address]struct{}) - for _, input := range pt.Inputs { - addrs[input.RelatedAddress] = struct{}{} - } - for _, output := range pt.Outputs { - // Miner fees don't have an address, so skip them. - if output.FundType == specifierMinerFee { - continue - } - addrs[output.RelatedAddress] = struct{}{} - } - for addr := range addrs { - if err := dbAddAddrTransaction(tx, addr, pt.TransactionID); err != nil { - return modules.AddContext(err, fmt.Sprintf("failed to add txn %v to address %v", - pt.TransactionID, addr)) + for _, sce := range w.sces { + cu.UpdateElementProof(&sce.StateElement) + w.sces[sce.SiacoinOutput.Address] = sce + _, err := updateStmt.Exec(encodeProof(sce.MerkleProof), sce.LeafIndex, sce.ID[:]) + if err != nil { + w.dbError = true + return modules.AddContext(err, "failed to update Siacoin element") } } - return nil -} -// decodeProcessedTransaction decodes a marshalled ProcessedTransaction. -func decodeProcessedTransaction(ptBytes []byte, pt *modules.ProcessedTransaction) error { - d := types.NewBufDecoder(ptBytes) - pt.DecodeFrom(d) - return d.Err() + return nil } -// dbAppendProcessedTransaction adds a new ProcessedTransaction. -func dbAppendProcessedTransaction(tx *sql.Tx, pt modules.ProcessedTransaction) error { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - pt.EncodeTo(e) - e.Flush() - _, err := tx.Exec(` - INSERT INTO wt_txn (txid, bytes) VALUES (?, ?) AS new - ON DUPLICATE KEY UPDATE bytes = new.bytes - `, pt.TransactionID[:], buf.Bytes()) +// updateSiafundElementProofs updates the Merkle proofs on each SF element. +func (w *Wallet) updateSiafundElementProofs(cu chainUpdate) error { + updateStmt, err := w.tx.Prepare(` + UPDATE wt_sfes + SET merkle_proof = ?, leaf_index = ? + WHERE sfoid = ? + `) if err != nil { - return modules.AddContext(err, "failed to store processed txn in database") + return modules.AddContext(err, "failed to prepare update statement") } + defer updateStmt.Close() - // Also add this txid to wt_addr. - if err = dbAddProcessedTransactionAddrs(tx, pt); err != nil { - return modules.AddContext(err, "failed to add processed transaction to addresses in database") + for _, sfe := range w.sfes { + cu.UpdateElementProof(&sfe.StateElement) + w.sfes[sfe.SiafundOutput.Address] = sfe + _, err := updateStmt.Exec(encodeProof(sfe.MerkleProof), sfe.LeafIndex, sfe.ID[:]) + if err != nil { + w.dbError = true + return modules.AddContext(err, "failed to update Siafund element") + } } return nil } -// dbGetLastProcessedTransaction retrieves the last ProcessedTransaction. -func dbGetLastProcessedTransaction(tx *sql.Tx) (pt modules.ProcessedTransaction, err error) { - var ptBytes []byte - err = tx.QueryRow("SELECT bytes FROM wt_txn ORDER BY id DESC LIMIT 1").Scan(&ptBytes) - if err != nil { - return +// load loads the wallet data from the database. +func (w *Wallet) load() (err error) { + s := make([]byte, 32) + var progress uint64 + if err := w.db.QueryRow(` + SELECT seed, progress + FROM wt_info + WHERE id = 1 + `).Scan(&s, &progress); err != nil && !errors.Is(err, sql.ErrNoRows) { + return modules.AddContext(err, "couldn't load seed") } - err = decodeProcessedTransaction(ptBytes, &pt) - return -} + copy(w.seed[:], s) + for _, key := range generateKeys(w.seed, 0, progress) { + w.keys[types.StandardUnlockHash(key.PublicKey())] = key + } + w.regenerateLookahead(progress) -// dbDeleteLastProcessedTransaction deletes the last ProcessedTransaction. -func dbDeleteLastProcessedTransaction(tx *sql.Tx) error { - // Get the last processed txn. - var txid types.TransactionID - id := make([]byte, 32) - err := tx.QueryRow("SELECT txid FROM wt_txn ORDER BY id DESC LIMIT 1").Scan(&id) - if err != nil { - return err + b := make([]byte, 32) + if err := w.db.QueryRow(` + SELECT height, bid + FROM wt_tip + WHERE id = 1 + `).Scan(&w.tip.Height, &b); err != nil && !errors.Is(err, sql.ErrNoRows) { + return modules.AddContext(err, "couldn't load last chain index") } + copy(w.tip.ID[:], b) - // Delete the associated mappings. - copy(txid[:], id) - _, err = tx.Exec("DELETE FROM wt_addr WHERE txid = ?", txid[:]) + rows, err := w.db.Query(` + SELECT id, addr + FROM wt_addresses + `) if err != nil { - return err + return modules.AddContext(err, "couldn't query addresses") } - // Delete the last processed txn. - _, err = tx.Exec("DELETE FROM wt_txn WHERE txid = ?", txid[:]) - return err -} - -// dbGetProcessedTransaction retrieves a txn from the database. -func dbGetProcessedTransaction(tx *sql.Tx, txid types.TransactionID) (pt modules.ProcessedTransaction, err error) { - var ptBytes []byte - err = tx.QueryRow("SELECT bytes FROM wt_txn WHERE txid = ?", txid[:]).Scan(&ptBytes) - if err != nil { - return + for rows.Next() { + var index uint64 + var addr types.Address + if err := rows.Scan(&index, &b); err != nil { + return modules.AddContext(err, "couldn't scan address") + } + copy(addr[:], b) + w.addrs[addr] = index } - err = decodeProcessedTransaction(ptBytes, &pt) - return -} -// dbGetWalletSalt returns the salt used by the wallet to derive encryption keys. -func dbGetWalletSalt(tx *sql.Tx) (uid walletSalt) { - salt := make([]byte, 32) - err := tx.QueryRow("SELECT salt FROM wt_info WHERE id = 1").Scan(&salt) + rows.Close() + + rows, err = w.db.Query(` + SELECT id, addr + FROM wt_addresses + WHERE id IN ( + SELECT address_id + FROM wt_watched + ) + `) if err != nil { - return + return modules.AddContext(err, "couldn't query watched addresses") } - copy(uid[:], salt) - return -} - -// dbPutWalletSalt saves the salt to disk. -func dbPutWalletSalt(tx *sql.Tx, uid walletSalt) error { - _, err := tx.Exec("UPDATE wt_info SET salt = ? WHERE id = 1", uid[:]) - return err -} -// dbGetPrimarySeedProgress returns the number of keys generated from the -// primary seed. -func dbGetPrimarySeedProgress(tx *sql.Tx) (progress uint64, err error) { - err = tx.QueryRow("SELECT progress FROM wt_info WHERE id = 1").Scan(&progress) - return -} + for rows.Next() { + var index uint64 + var addr types.Address + if err := rows.Scan(&index, &b); err != nil { + return modules.AddContext(err, "couldn't scan watched address") + } + copy(addr[:], b) + w.watchedAddrs[addr] = index + } -// dbPutPrimarySeedProgress sets the primary seed progress counter. -func dbPutPrimarySeedProgress(tx *sql.Tx, progress uint64) error { - _, err := tx.Exec("UPDATE wt_info SET progress = ? WHERE id = 1", progress) - return err -} + rows.Close() -// dbGetConsensusChangeID returns the ID of the last ConsensusChange processed by the wallet. -func dbGetConsensusChangeID(tx *sql.Tx) (cc modules.ConsensusChangeID) { - ccBytes := make([]byte, 32) - err := tx.QueryRow("SELECT cc FROM wt_info WHERE id = 1").Scan(&ccBytes) + rows, err = w.db.Query(` + SELECT + wt_sces.scoid, + wt_sces.sc_value, + wt_sces.merkle_proof, + wt_sces.leaf_index, + wt_sces.maturity_height, + wt_addresses.addr + FROM wt_sces + INNER JOIN wt_addresses + ON wt_sces.address_id = wt_addresses.id + `) if err != nil { - return modules.ConsensusChangeID{} + return modules.AddContext(err, "couldn't query SC elements") } - copy(cc[:], ccBytes) - return -} - -// dbPutConsensusChangeID stores the ID of the last ConsensusChange processed by the wallet. -func dbPutConsensusChangeID(tx *sql.Tx, cc modules.ConsensusChangeID) error { - _, err := tx.Exec("UPDATE wt_info SET cc = ? WHERE id = 1", cc[:]) - return err -} -// dbGetConsensusHeight returns the height that the wallet has scanned to. -func dbGetConsensusHeight(tx *sql.Tx) (height uint64, err error) { - err = tx.QueryRow("SELECT height FROM wt_info WHERE id = 1").Scan(&height) - return -} + for rows.Next() { + id := make([]byte, 32) + addr := make([]byte, 32) + var v, proof []byte + var li, mh uint64 + if err = rows.Scan(&id, &v, &proof, &li, &mh, &addr); err != nil { + return modules.AddContext(err, "couldn't scan SC element") + } + sce := types.SiacoinElement{ + StateElement: types.StateElement{ + MerkleProof: decodeProof(proof), + LeafIndex: li, + }, + MaturityHeight: mh, + SiacoinOutput: types.SiacoinOutput{ + Value: decodeCurrency(v), + }, + } + copy(sce.ID[:], id) + copy(sce.SiacoinOutput.Address[:], addr) + w.sces[sce.SiacoinOutput.Address] = sce + } -// dbPutConsensusHeight stores the height that the wallet has scanned to. -func dbPutConsensusHeight(tx *sql.Tx, height uint64) error { - _, err := tx.Exec("UPDATE wt_info SET height = ? WHERE id = 1", height) - return err -} + rows.Close() -// dbGetSiafundPool returns the value of the Siafund pool. -func dbGetSiafundPool(tx *sql.Tx) (pool types.Currency, err error) { - var poolBytes []byte - err = tx.QueryRow("SELECT sfpool FROM wt_info WHERE id = 1").Scan(&poolBytes) + rows, err = w.db.Query(` + SELECT + wt_sfes.sfoid, + wt_sfes.claim_start, + wt_sfes.merkle_proof, + wt_sfes.leaf_index, + wt_sfes.sf_value, + wt_addresses.addr + FROM wt_sfes + INNER JOIN wt_addresses + ON wt_sfes.address_id = wt_addresses.id + `) if err != nil { - return types.ZeroCurrency, err + return modules.AddContext(err, "couldn't query SF elements") } - if len(poolBytes) == 0 { - return types.ZeroCurrency, nil + + for rows.Next() { + id := make([]byte, 32) + addr := make([]byte, 32) + var cs, proof []byte + var value, li uint64 + if err = rows.Scan(&id, &cs, &proof, &li, &value, &addr); err != nil { + return modules.AddContext(err, "couldn't scan SF element") + } + sfe := types.SiafundElement{ + StateElement: types.StateElement{ + MerkleProof: decodeProof(proof), + LeafIndex: li, + }, + SiafundOutput: types.SiafundOutput{Value: value}, + ClaimStart: decodeCurrency(cs), + } + copy(sfe.ID[:], id) + copy(sfe.SiafundOutput.Address[:], addr) + w.sfes[sfe.SiafundOutput.Address] = sfe } - d := types.NewBufDecoder(poolBytes) - pool.DecodeFrom(d) - return pool, d.Err() -} -// dbPutSiafundPool stores the value of the Siafund pool. -func dbPutSiafundPool(tx *sql.Tx, pool types.Currency) error { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - pool.EncodeTo(e) - e.Flush() - _, err := tx.Exec("UPDATE wt_info SET sfpool = ? WHERE id = 1", buf.Bytes()) - return err -} + rows.Close() -// dbGetWatchedAddresses retrieves the set of watched addresses. -func dbGetWatchedAddresses(tx *sql.Tx) (addrs []types.Address, err error) { - rows, err := tx.Query("SELECT addr FROM wt_watch") + rows, err = w.db.Query("SELECT id FROM wt_spent") if err != nil { - return nil, err + return modules.AddContext(err, "couldn't query spent outputs") } - defer rows.Close() for rows.Next() { - var addr types.Address - a := make([]byte, 32) - if err := rows.Scan(&a); err != nil { - return nil, err + var id types.Hash256 + if err := rows.Scan(&b); err != nil { + return modules.AddContext(err, "couldn't scan spent output") } - copy(addr[:], a) - addrs = append(addrs, addr) + copy(id[:], b) + w.used[id] = true } - return + rows.Close() + + w.tx, err = w.db.Begin() + if err != nil { + return modules.AddContext(err, "couldn't start wallet transaction") + } + + return nil } -// dbPutWatchedAddresses stores the set of watched addresses. -func dbPutWatchedAddresses(tx *sql.Tx, addrs []types.Address) error { - for _, addr := range addrs { - _, err := tx.Exec("REPLACE INTO wt_watch (addr) VALUES (?)", addr[:]) +// save saves the wallet state. +// A lock must be acquired before calling this function. +func (w *Wallet) save() (err error) { + if w.dbError { + err = w.tx.Rollback() + if err != nil { + return err + } + w.dbError = false + } else { + err = w.tx.Commit() if err != nil { return err } } - return nil + w.tx, err = w.db.Begin() + return err } -// dbGetEncryptedVerification returns the encrypted ciphertext. -func dbGetEncryptedVerification(tx *sql.Tx) ([]byte, error) { - var encrypted []byte - err := tx.QueryRow("SELECT encrypted FROM wt_info WHERE id = 1").Scan(&encrypted) +// reset resets the database before rescanning. +func (w *Wallet) reset() (err error) { + w.mu.Lock() + defer w.mu.Unlock() + + _, err = w.tx.Exec("DROP TABLE wt_sces") if err != nil { - return nil, err + w.dbError = true + return modules.AddContext(err, "couldn't drop SC elements") } - empty := make([]byte, len(encrypted)) - if bytes.Equal(encrypted, empty) { - return nil, nil + _, err = w.tx.Exec("DROP TABLE wt_sfes") + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't drop SF elements") } - return encrypted, nil -} - -// dbPutEncryptedVerification sets a new encrypted ciphertext. -func dbPutEncryptedVerification(tx *sql.Tx, encrypted []byte) error { - _, err := tx.Exec("UPDATE wt_info SET encrypted = ? WHERE id = 1", encrypted) - return err -} - -// dbGetPrimarySeed returns the wallet primary seed. -func dbGetPrimarySeed(tx *sql.Tx) (seed encryptedSeed, err error) { - u := make([]byte, 32) - var e, s []byte - err = tx.QueryRow("SELECT salt, encrypted, seed FROM wt_info WHERE id = 1").Scan(&u, &e, &s) + _, err = w.tx.Exec("DROP TABLE wt_watched") if err != nil { - return + w.dbError = true + return modules.AddContext(err, "couldn't drop watched addresses") } - copy(seed.UID[:], u) - seed.EncryptionVerification = make([]byte, len(e)) - copy(seed.EncryptionVerification, e) - seed.Seed = make([]byte, len(s)) - copy(seed.Seed, s) - return -} - -// dbPutPrimarySeed saves the wallet primary seed. -func dbPutPrimarySeed(tx *sql.Tx, seed encryptedSeed) error { - _, err := tx.Exec("UPDATE wt_info SET salt = ?, encrypted = ?, seed = ? WHERE id = 1", seed.UID[:], seed.EncryptionVerification, seed.Seed) - return err -} - -// dbGetWalletPassword returns the wallet password. -func dbGetWalletPassword(tx *sql.Tx) (pwd []byte, err error) { - err = tx.QueryRow("SELECT pwd FROM wt_info WHERE id = 1").Scan(&pwd) - return -} - -// dbPutWalletPassword saves the wallet password. -func dbPutWalletPassword(tx *sql.Tx, pwd []byte) error { - _, err := tx.Exec("UPDATE wt_info SET pwd = ? WHERE id = 1", pwd) - return err -} - -// dbGetAuxiliarySeeds retrieves the auxiliary seeds. -func dbGetAuxiliarySeeds(tx *sql.Tx) (seeds []encryptedSeed, err error) { - rows, err := tx.Query("SELECT salt, encrypted, seed FROM wt_aux") + _, err = w.tx.Exec("DROP TABLE wt_addresses") if err != nil { - return nil, err + w.dbError = true + return modules.AddContext(err, "couldn't drop addresses") + } + _, err = w.tx.Exec("DROP TABLE wt_spent") + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't drop spent outputs") } - defer rows.Close() - for rows.Next() { - var seed encryptedSeed - u := make([]byte, 32) - var e, s []byte - if err := rows.Scan(&u, &e, &s); err != nil { - return nil, err - } - copy(seed.UID[:], u) - seed.EncryptionVerification = make([]byte, len(e)) - copy(seed.EncryptionVerification, e) - seed.Seed = make([]byte, len(s)) - copy(seed.Seed, s) - seeds = append(seeds, seed) + _, err = w.tx.Exec(` + CREATE TABLE wt_addresses ( + id BIGINT NOT NULL AUTO_INCREMENT, + addr BINARY(32) NOT NULL UNIQUE, + PRIMARY KEY (id) + ) + `) + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't create addresses") } - return -} + _, err = w.tx.Exec(` + CREATE TABLE wt_sces ( + id BIGINT NOT NULL AUTO_INCREMENT, + scoid BINARY(32) NOT NULL UNIQUE, + sc_value BLOB NOT NULL, + merkle_proof BLOB NOT NULL, + leaf_index BIGINT UNSIGNED NOT NULL, + maturity_height BIGINT UNSIGNED NOT NULL, + address_id BIGINT NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) + ) + `) + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't create SC elements") + } -// dbPutAuxiliarySeeds saves the auxiliary seeds. -func dbPutAuxiliarySeeds(tx *sql.Tx, seeds []encryptedSeed) error { - _, err := tx.Exec("DELETE FROM wt_aux") + _, err = w.tx.Exec(` + CREATE TABLE wt_sfes ( + id BIGINT NOT NULL AUTO_INCREMENT, + sfoid BINARY(32) NOT NULL UNIQUE, + claim_start BLOB NOT NULL, + merkle_proof BLOB NOT NULL, + leaf_index BIGINT UNSIGNED NOT NULL, + sf_value BIGINT UNSIGNED NOT NULL, + address_id BIGINT NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) + ) + `) if err != nil { - return err + w.dbError = true + return modules.AddContext(err, "couldn't create SF elements") } - for _, seed := range seeds { - _, err := tx.Exec("INSERT INTO wt_aux (salt, encrypted, seed) VALUES (?, ?, ?)", seed.UID[:], seed.EncryptionVerification, seed.Seed) - if err != nil { - return err - } + + _, err = w.tx.Exec(` + CREATE TABLE wt_watched ( + address_id BIGINT NOT NULL UNIQUE, + FOREIGN KEY (address_id) REFERENCES wt_addresses(id) + ) + `) + if err != nil { + w.dbError = true + return modules.AddContext(err, "couldn't create watched addresses") } - return nil -} -// dbGetUnseededKeys retrieves the spendable keys. -func dbGetUnseededKeys(tx *sql.Tx) (keys []encryptedSpendableKey, err error) { - rows, err := tx.Query("SELECT salt, encrypted, skey FROM wt_keys") + _, err = w.tx.Exec(` + CREATE TABLE wt_spent ( + id BINARY(32) NOT NULL, + PRIMARY KEY (id) + ) + `) if err != nil { - return nil, err + w.dbError = true + return modules.AddContext(err, "couldn't create spent outputs") } - defer rows.Close() - for rows.Next() { - var sk encryptedSpendableKey - s := make([]byte, 32) - var e, k []byte - if err := rows.Scan(&s, &e, &k); err != nil { - return nil, err - } - copy(sk.Salt[:], s) - sk.EncryptionVerification = make([]byte, len(e)) - copy(sk.EncryptionVerification, e) - sk.SpendableKey = make([]byte, len(k)) - copy(sk.SpendableKey, k) - keys = append(keys, sk) + if err := w.updateTip(w.tip); err != nil { + return err } - return + return w.save() } -// dbPutUnseededKeys saves the spendable keys. -func dbPutUnseededKeys(tx *sql.Tx, keys []encryptedSpendableKey) error { - _, err := tx.Exec("DELETE FROM wt_keys") +// saveSeed saves the new seed. +func (w *Wallet) saveSeed(progress uint64) error { + _, err := w.tx.Exec(` + REPLACE INTO wt_info (id, seed, progress) + VALUES (1, ?, ?) + `, w.seed[:], progress) if err != nil { - return err - } - for _, key := range keys { - _, err := tx.Exec("INSERT INTO wt_keys (salt, encrypted, skey) VALUES (?, ?, ?)", key.Salt[:], key.EncryptionVerification, key.SpendableKey) - if err != nil { - return err - } + w.dbError = true + return modules.AddContext(err, "couldn't save wallet seed") } - return nil + return w.save() } -// dbResetBeforeRescan deletes the wallet history before starting a new scan. -func dbResetBeforeRescan(tx *sql.Tx) error { - _, err := tx.Exec("DELETE FROM wt_txn") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_addr") - if err != nil { - return err - } - _, err = tx.Exec("DELETE FROM wt_sco") - if err != nil { - return err +// getSeedProgress returns the current seed progress. +func (w *Wallet) getSeedProgress() (progress uint64, err error) { + err = w.tx.QueryRow("SELECT progress FROM wt_info WHERE id = 1").Scan(&progress) + if errors.Is(err, sql.ErrNoRows) { + err = nil } - _, err = tx.Exec("DELETE FROM wt_sfo") + return +} + +// putSeedProgress updates the current seed progress. +func (w *Wallet) putSeedProgress(progress uint64) error { + _, err := w.tx.Exec("UPDATE wt_info SET progress = ? WHERE id = 1", progress) if err != nil { - return err + w.dbError = true } - _, err = tx.Exec("DELETE FROM wt_spo") + return err +} + +// insertSpentOutput adds a new spent output. +func (w *Wallet) insertSpentOutput(id types.Hash256) error { + w.used[id] = true + _, err := w.tx.Exec("INSERT INTO wt_spent (id) VALUES (?)", id[:]) if err != nil { - return err + w.dbError = true } - _, err = tx.Exec("DELETE FROM wt_uc") + return err +} + +// removeSpentOutput removes a spent output. +func (w *Wallet) removeSpentOutput(id types.Hash256) error { + delete(w.used, id) + _, err := w.tx.Exec("DELETE FROM wt_spent WHERE id = ?", id[:]) if err != nil { - return err + w.dbError = true } - return nil + return err } diff --git a/modules/wallet/defrag.go b/modules/wallet/defrag.go index e799188..01ddee9 100644 --- a/modules/wallet/defrag.go +++ b/modules/wallet/defrag.go @@ -5,47 +5,46 @@ import ( "sort" "github.com/mike76-dev/sia-satellite/modules" + "go.uber.org/zap" "go.sia.tech/core/types" ) var ( errDefragNotNeeded = errors.New("defragging not needed, wallet is already sufficiently defragged") + errDustOutput = errors.New("output is too small") + errSpentOutput = errors.New("output is already spent") +) + +const ( + defragStartIndex = 10 + defragBatchSize = 35 ) // managedCreateDefragTransaction creates a transaction that spends multiple existing // wallet outputs into a single new address. func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err error) { - // dustThreshold and minFee have to be obtained separate from the lock. - dustThreshold, err := w.DustThreshold() - if err != nil { - return nil, err - } - minFee, _ := w.tpool.FeeEstimation() + dustThreshold := w.DustThreshold() + fee := w.cm.RecommendedFee() w.mu.Lock() defer w.mu.Unlock() - consensusHeight, err := dbGetConsensusHeight(w.dbTx) - if err != nil { - return nil, err - } - // Collect a value-sorted set of Siacoin outputs. var so sortedOutputs - err = dbForEachSiacoinOutput(w.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { - if w.checkOutput(w.dbTx, consensusHeight, scoid, sco, dustThreshold) == nil { - so.ids = append(so.ids, scoid) - so.outputs = append(so.outputs, sco) + for _, sce := range w.sces { + if w.checkOutput(sce, dustThreshold) == nil { + so.ids = append(so.ids, types.SiacoinOutputID(sce.ID)) + so.outputs = append(so.outputs, sce.SiacoinOutput) } - }) + } if err != nil { return nil, err } sort.Sort(sort.Reverse(so)) // Only defrag if there are enough outputs to merit defragging. - if len(so.ids) <= defragThreshold { + if len(so.ids) <= 50 { return nil, errDefragNotNeeded } @@ -59,7 +58,7 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er sco := so.outputs[i] // Add a Siacoin input for this output. - outputUnlockConditions := w.keys[sco.Address].UnlockConditions + outputUnlockConditions := types.StandardUnlockConditions(w.keys[sco.Address].PublicKey()) sci := types.SiacoinInput{ ParentID: scoid, UnlockConditions: outputUnlockConditions, @@ -73,7 +72,7 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er // Create and add the output that will be used to fund the defrag // transaction. - parentUnlockConditions, err := w.nextPrimarySeedAddress(w.dbTx) + parentUnlockConditions, err := w.nextAddress() if err != nil { return nil, err } @@ -89,12 +88,17 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, exactOutput) // Sign all of the inputs to the parent transaction. - for _, sci := range parentTxn.SiacoinInputs { - addSignatures(&parentTxn, modules.FullCoveredFields(), sci.UnlockConditions, types.Hash256(sci.ParentID), w.keys[sci.UnlockConditions.UnlockHash()], consensusHeight) + for i, sci := range parentTxn.SiacoinInputs { + parentTxn.Signatures = append(parentTxn.Signatures, types.TransactionSignature{ + ParentID: types.Hash256(sci.ParentID), + CoveredFields: types.CoveredFields{WholeTransaction: true}, + PublicKeyIndex: uint64(i), + }) + SignTransaction(w.cm.TipState(), &parentTxn, i, w.keys[sci.UnlockConditions.UnlockHash()]) } // Create the defrag transaction. - refundAddr, err := w.nextPrimarySeedAddress(w.dbTx) + refundAddr, err := w.nextAddress() if err != nil { return nil, err } @@ -106,7 +110,7 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er // Compute the transaction fee. sizeAvgOutput := uint64(250) - fee := minFee.Mul64(sizeAvgOutput * defragBatchSize) + fee = fee.Mul64(sizeAvgOutput * defragBatchSize) txn := types.Transaction{ SiacoinInputs: []types.SiacoinInput{{ @@ -119,17 +123,23 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er }}, MinerFees: []types.Currency{fee}, } - addSignatures(&txn, modules.FullCoveredFields(), parentUnlockConditions, types.Hash256(parentTxn.SiacoinOutputID(0)), w.keys[parentUnlockConditions.UnlockHash()], consensusHeight) + txn.Signatures = append(txn.Signatures, types.TransactionSignature{ + ParentID: types.Hash256(parentTxn.SiacoinOutputID(0)), + CoveredFields: types.CoveredFields{WholeTransaction: true}, + }) + SignTransaction(w.cm.TipState(), &txn, 0, w.keys[parentUnlockConditions.UnlockHash()]) // Mark all outputs that were spent as spent. for _, scoid := range spentScoids { - if err = dbPutSpentOutput(w.dbTx, types.Hash256(scoid), consensusHeight); err != nil { + if err := w.insertSpentOutput(types.Hash256(scoid)); err != nil { return nil, err } + } + // Mark the parent output as spent. Must be done after the transaction is // finished because otherwise the txid and output id will change. - if err = dbPutSpentOutput(w.dbTx, types.Hash256(parentTxn.SiacoinOutputID(0)), consensusHeight); err != nil { + if err := w.insertSpentOutput(types.Hash256(parentTxn.SiacoinOutputID(0))); err != nil { return nil, err } @@ -142,29 +152,12 @@ func (w *Wallet) managedCreateDefragTransaction() (_ []types.Transaction, err er // operation is only performed if the wallet has greater than defragThreshold // outputs. func (w *Wallet) threadedDefragWallet() { - // Don't defrag if it was disabled. - w.mu.RLock() - disabled := w.defragDisabled - w.mu.RUnlock() - if disabled { - return - } - err := w.tg.Add() if err != nil { return } defer w.tg.Done() - // Check if a defrag makes sense. - w.mu.RLock() - unlocked := w.unlocked - w.mu.RUnlock() - if !unlocked { - // Can't defrag if the wallet is locked. - return - } - // Create the defrag transaction. txnSet, err := w.managedCreateDefragTransaction() defer func() { @@ -175,26 +168,26 @@ func (w *Wallet) threadedDefragWallet() { defer w.mu.Unlock() for _, txn := range txnSet { for _, sci := range txn.SiacoinInputs { - dbDeleteSpentOutput(w.dbTx, types.Hash256(sci.ParentID)) + if err := w.removeSpentOutput(types.Hash256(sci.ParentID)); err != nil { + w.log.Error("couldn't remove spent output", zap.Error(err)) + } } } }() if modules.ContainsError(err, errDefragNotNeeded) { - // Begin. return } else if err != nil { - w.log.Println("WARN: couldn't create defrag transaction:", err) + w.log.Warn("couldn't create defrag transaction", zap.Error(err)) return } // Submit the defrag to the transaction pool. - err = w.tpool.AcceptTransactionSet(txnSet) + _, err = w.cm.AddPoolTransactions(txnSet) if err != nil { - w.log.Println("WARN: defrag transaction was rejected:", err) + w.Release(txnSet) + w.log.Error("invalid transaction set", zap.Error(err)) return } - w.log.Println("INFO: submitting a transaction set to defragment the wallet's outputs, IDs:") - for _, txn := range txnSet { - w.log.Println("Wallet defrag: \t", txn.ID()) - } + w.s.BroadcastTransactionSet(txnSet) + w.log.Info("submitting a transaction set to defragment the wallet's outputs") } diff --git a/modules/wallet/encoding.go b/modules/wallet/encoding.go new file mode 100644 index 0000000..a5d92cf --- /dev/null +++ b/modules/wallet/encoding.go @@ -0,0 +1,52 @@ +package wallet + +import ( + "bytes" + "encoding/binary" + + "go.sia.tech/core/types" +) + +func encodeCurrency(c types.Currency) []byte { + buf := make([]byte, 16) + binary.LittleEndian.PutUint64(buf, c.Lo) + binary.LittleEndian.PutUint64(buf[8:], c.Hi) + return buf +} + +func decodeCurrency(buf []byte) types.Currency { + if len(buf) != 16 { + panic("wrong currency length") + } + var c types.Currency + c.Lo = binary.LittleEndian.Uint64(buf) + c.Hi = binary.LittleEndian.Uint64(buf[8:]) + return c +} + +func encodeProof(proof []types.Hash256) []byte { + var buf bytes.Buffer + e := types.NewEncoder(&buf) + e.WritePrefix(len(proof)) + for _, hash := range proof { + hash.EncodeTo(e) + } + e.Flush() + return buf.Bytes() +} + +func decodeProof(buf []byte) []types.Hash256 { + d := types.NewBufDecoder(buf) + l := d.ReadPrefix() + if err := d.Err(); err != nil { + panic(err) + } + proof := make([]types.Hash256, l) + for i := range proof { + proof[i].DecodeFrom(d) + if err := d.Err(); err != nil { + panic(err) + } + } + return proof +} diff --git a/modules/wallet/encrypt.go b/modules/wallet/encrypt.go deleted file mode 100644 index 3237369..0000000 --- a/modules/wallet/encrypt.go +++ /dev/null @@ -1,815 +0,0 @@ -package wallet - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "time" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" - - "lukechampine.com/frand" -) - -var ( - errAlreadyUnlocked = errors.New("wallet has already been unlocked") - errReencrypt = errors.New("wallet is already encrypted, cannot encrypt again") - errScanInProgress = errors.New("another wallet rescan is already underway") - errUnencryptedWallet = errors.New("wallet has not been encrypted yet") - - // verificationPlaintext is the plaintext used to verify encryption keys. - // By storing the corresponding ciphertext for a given key, we can later - // verify that a key is correct by using it to decrypt the ciphertext and - // comparing the result to verificationPlaintext. - verificationPlaintext = []byte("Sia Satellite is built upon Sia!") -) - -// saltedEncryptionKey creates an encryption key that is used to decrypt a -// specific key. -func saltedEncryptionKey(masterKey modules.WalletKey, salt walletSalt) modules.WalletKey { - h := types.NewHasher() - h.E.Write(masterKey[:]) - h.E.Write(salt[:]) - entropy := h.Sum() - return modules.WalletKey(entropy[:]) -} - -// walletPasswordEncryptionKey creates an encryption key that is used to -// encrypt/decrypt the master encryption key. -func walletPasswordEncryptionKey(seed modules.Seed, salt walletSalt) modules.WalletKey { - h := types.NewHasher() - h.E.Write(seed[:]) - h.E.Write(salt[:]) - entropy := h.Sum() - return modules.WalletKey(entropy[:]) -} - -// verifyEncryption verifies that key properly decrypts the ciphertext to a -// preset plaintext. -func verifyEncryption(key modules.WalletKey, encrypted []byte) error { - verification, err := modules.Decrypt(key, encrypted) - if err != nil { - contextErr := modules.AddContext(modules.ErrBadEncryptionKey, "failed to decrypt key") - return modules.ComposeErrors(err, contextErr) - } - if !bytes.Equal(verificationPlaintext, verification) { - return modules.ErrBadEncryptionKey - } - return nil -} - -// checkMasterKey verifies that the masterKey is the key used to encrypt the -// wallet. -func checkMasterKey(tx *sql.Tx, masterKey modules.WalletKey) error { - if masterKey == nil { - return modules.ErrBadEncryptionKey - } - uk := saltedEncryptionKey(masterKey, dbGetWalletSalt(tx)) - encryptedVerification, err := dbGetEncryptedVerification(tx) - if err != nil { - return err - } - return verifyEncryption(uk, encryptedVerification) -} - -// initEncryption initializes and encrypts the primary seed. -func (w *Wallet) initEncryption(masterKey modules.WalletKey, seed modules.Seed, progress uint64) (modules.Seed, error) { - // Check if the wallet encryption key has already been set. - encryptedVerification, err := dbGetEncryptedVerification(w.dbTx) - if err != nil { - return modules.Seed{}, err - } - if encryptedVerification != nil { - return modules.Seed{}, errReencrypt - } - - // Save the primary seed. - s := createSeed(masterKey, seed) - err = dbPutPrimarySeed(w.dbTx, s) - if err != nil { - return modules.Seed{}, err - } - - // Record the progress. - err = dbPutPrimarySeedProgress(w.dbTx, progress) - if err != nil { - return modules.Seed{}, err - } - - // Establish the encryption verification using the masterKey. After this - // point, the wallet is encrypted. - uk := saltedEncryptionKey(masterKey, dbGetWalletSalt(w.dbTx)) - encryptedVerification, err = modules.Encrypt(uk, verificationPlaintext) - if err != nil { - return modules.Seed{}, modules.AddContext(err, "failed to encrypt verification") - } - err = dbPutEncryptedVerification(w.dbTx, encryptedVerification) - if err != nil { - return modules.Seed{}, err - } - - // Encrypt the masterkey using the seed to allow for a masterkey recovery using - // the seed. - wpk := walletPasswordEncryptionKey(seed, dbGetWalletSalt(w.dbTx)) - encrypted, err := modules.Encrypt(wpk, masterKey[:]) - if err != nil { - return modules.Seed{}, modules.AddContext(err, "failed to encrypt masterkey") - } - err = dbPutWalletPassword(w.dbTx, encrypted) - if err != nil { - return modules.Seed{}, err - } - - // On future startups, this field will be set by w.initPersist. - w.encrypted = true - - return seed, nil -} - -// managedMasterKey retrieves the masterkey that was stored encrypted in the -// wallet's database. -func (w *Wallet) managedMasterKey(seed modules.Seed) (modules.WalletKey, error) { - w.mu.Lock() - defer w.mu.Unlock() - - // Check if wallet is encrypted. - if !w.encrypted { - return nil, errUnencryptedWallet - } - - // Compute password from seed. - wpk := walletPasswordEncryptionKey(seed, dbGetWalletSalt(w.dbTx)) - - // Grab the encrypted masterkey. - encryptedMK, err := dbGetWalletPassword(w.dbTx) - if err != nil { - return nil, err - } - if len(encryptedMK) == 0 { - return nil, errors.New("wallet is encrypted but masterkey is missing") - } - - // Decrypt the masterkey. - masterKey, err := modules.Decrypt(wpk, encryptedMK) - if err != nil { - return nil, modules.AddContext(err, "failed to decrypt masterkey") - } - - return modules.WalletKey(masterKey), nil -} - -// managedUnlock loads all of the encrypted structures into wallet memory. Even -// after loading, the structures are kept encrypted, but some data such as -// addresses are decrypted so that the wallet knows what to track. -func (w *Wallet) managedUnlock(masterKey modules.WalletKey) <-chan error { - errChan := make(chan error, 1) - - // Blocking unlock. - lastChange, err := w.managedBlockingUnlock(masterKey) - if err != nil { - errChan <- err - return errChan - } - - // Non-blocking unlock. - go func() { - defer close(errChan) - if err := w.tg.Add(); err != nil { - errChan <- err - return - } - defer w.tg.Done() - err := w.managedAsyncUnlock(lastChange) - if err != nil { - errChan <- err - } - }() - return errChan -} - -// managedBlockingUnlock handles the blocking part of hte managedUnlock method. -func (w *Wallet) managedBlockingUnlock(masterKey modules.WalletKey) (modules.ConsensusChangeID, error) { - w.mu.RLock() - unlocked := w.unlocked - encrypted := w.encrypted - w.mu.RUnlock() - if unlocked { - return modules.ConsensusChangeID{}, errAlreadyUnlocked - } else if !encrypted { - return modules.ConsensusChangeID{}, errUnencryptedWallet - } - - // Load db objects into memory. - var lastChange modules.ConsensusChangeID - var encryptedPrimarySeed encryptedSeed - var primarySeedProgress uint64 - var auxiliarySeeds []encryptedSeed - var unseededKeys []encryptedSpendableKey - var watchedAddrs []types.Address - err := func() error { - w.mu.Lock() - defer w.mu.Unlock() - - // Verify masterKey. - err := checkMasterKey(w.dbTx, masterKey) - if err != nil { - return err - } - - // lastChange. - lastChange = dbGetConsensusChangeID(w.dbTx) - - // encryptedPrimarySeed + primarySeedProgress. - encryptedPrimarySeed, err = dbGetPrimarySeed(w.dbTx) - if err != nil { - return err - } - primarySeedProgress, err = dbGetPrimarySeedProgress(w.dbTx) - if err != nil { - return err - } - - // auxiliarySeeds. - auxiliarySeeds, err = dbGetAuxiliarySeeds(w.dbTx) - if err != nil { - return err - } - - // unseededKeys. - unseededKeys, err = dbGetUnseededKeys(w.dbTx) - if err != nil { - return err - } - - // watchedAddrs. - watchedAddrs, err = dbGetWatchedAddresses(w.dbTx) - if err != nil { - return err - } - - return nil - }() - if err != nil { - return modules.ConsensusChangeID{}, err - } - - // Decrypt + load keys. - err = func() error { - w.mu.Lock() - defer w.mu.Unlock() - - // primarySeed. - primarySeed, err := decryptSeed(masterKey, encryptedPrimarySeed) - if err != nil { - return err - } - w.integrateSeed(primarySeed, primarySeedProgress) - w.primarySeed = primarySeed - w.regenerateLookahead(primarySeedProgress) - - // auxiliarySeeds. - for _, as := range auxiliarySeeds { - auxSeed, err := decryptSeed(masterKey, as) - if err != nil { - return err - } - w.integrateSeed(auxSeed, modules.PublicKeysPerSeed) - w.seeds = append(w.seeds, auxSeed) - } - - // unseededKeys. - for _, uk := range unseededKeys { - sk, err := decryptSpendableKey(masterKey, uk) - if err != nil { - return err - } - w.integrateSpendableKey(masterKey, sk) - } - - // watchedAddrs. - for _, addr := range watchedAddrs { - w.watchedAddrs[addr] = struct{}{} - } - - // If the wallet password hasn't been encrypted yet using the seed, do it. - wpk, err := dbGetWalletPassword(w.dbTx) - if err != nil { - return err - } - if len(wpk) == 0 { - wpk = walletPasswordEncryptionKey(primarySeed, dbGetWalletSalt(w.dbTx)) - encrypted, err := modules.Encrypt(wpk, masterKey) - if err != nil { - return modules.AddContext(err, "failed to encrypt masterkey") - } - return dbPutWalletPassword(w.dbTx, encrypted) - } - return nil - }() - if err != nil { - return modules.ConsensusChangeID{}, err - } - - w.mu.Lock() - w.unlocked = true - w.mu.Unlock() - return lastChange, nil -} - -// managedAsyncUnlock handles the async part of hte managedUnlock method. -func (w *Wallet) managedAsyncUnlock(lastChange modules.ConsensusChangeID) error { - // Subscribe to the consensus set if this is the first unlock for the - // wallet object. - w.subscribedMu.Lock() - defer w.subscribedMu.Unlock() - if !w.subscribed { - // Subscription can take a while, so spawn a goroutine to print the - // wallet height every few seconds. (If subscription completes - // quickly, nothing will be printed.) - done := make(chan struct{}) - go w.rescanMessage(done) - defer close(done) - - err := w.cs.ConsensusSetSubscribe(w, lastChange, w.tg.StopChan()) - if modules.ContainsError(err, modules.ErrInvalidConsensusChangeID) { - // Something went wrong; resubscribe from the beginning. - err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) - if err != nil { - return fmt.Errorf("failed to reset db during rescan: %v", err) - } - err = dbPutConsensusHeight(w.dbTx, 0) - if err != nil { - return fmt.Errorf("failed to reset db during rescan: %v", err) - } - // Delete the wallet history before resubscribing. Otherwise we - // will end up with duplicate entries in the database. - err = dbResetBeforeRescan(w.dbTx) - if err != nil { - return fmt.Errorf("failed to reset wallet history during rescan: %v", err) - } - err = w.syncDB() - if err != nil { - return fmt.Errorf("failed to sync database: %v", err) - } - err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning, w.tg.StopChan()) - } - if err != nil { - return fmt.Errorf("wallet subscription failed: %v", err) - } - w.tpool.TransactionPoolSubscribe(w) - } - w.subscribed = true - return nil -} - -// rescanMessage prints the blockheight every 3 seconds until done is closed. -func (w *Wallet) rescanMessage(done chan struct{}) { - // Sleep first because we may not need to print a message at all if - // done is closed quickly. - select { - case <-done: - return - case <-time.After(3 * time.Second): - } - - for { - w.mu.Lock() - height, _ := dbGetConsensusHeight(w.dbTx) - w.mu.Unlock() - fmt.Printf("\rWallet: scanned to height %d...", height) - - select { - case <-done: - fmt.Println("\nDone!") - return - case <-time.After(3 * time.Second): - } - } -} - -// wipeSecrets erases all of the seeds and secret keys in the wallet. -func (w *Wallet) wipeSecrets() { - // 'for i := range' must be used to prevent copies of secret data from - // being made. - for i := range w.keys { - for j := range w.keys[i].SecretKeys { - for k := range w.keys[i].SecretKeys[j] { - w.keys[i].SecretKeys[j][k] = 0 - } - } - } - for i := range w.seeds { - for j := range w.seeds[i] { - w.seeds[i][j] = 0 - } - } - for i := range w.primarySeed { - w.primarySeed[i] = 0 - } - w.seeds = w.seeds[:0] -} - -// Encrypted returns whether or not the wallet has been encrypted. -func (w *Wallet) Encrypted() (bool, error) { - if err := w.tg.Add(); err != nil { - return false, err - } - defer w.tg.Done() - w.mu.Lock() - defer w.mu.Unlock() - return w.encrypted, nil -} - -// Encrypt will create a primary seed for the wallet and encrypt it using -// masterKey. If masterKey is blank, then the hash of the primary seed will be -// used instead. The wallet will still be locked after Encrypt is called. -// -// Encrypt can only be called once throughout the life of the wallet, and will -// return an error on subsequent calls (even after restarting the wallet). To -// reset the wallet, the wallet must be deleted. -func (w *Wallet) Encrypt(masterKey modules.WalletKey) (modules.Seed, error) { - if err := w.tg.Add(); err != nil { - return modules.Seed{}, err - } - defer w.tg.Done() - w.mu.Lock() - defer w.mu.Unlock() - - // Create a random seed. - var seed modules.Seed - frand.Read(seed[:]) - - // If masterKey is blank, use the hash of the seed. - if masterKey == nil { - h := types.NewHasher() - h.E.Write(seed[:]) - hash := h.Sum() - key := make([]byte, len(hash)) - copy(key[:], hash[:]) - masterKey = modules.WalletKey(key) - frand.Read(hash[:]) - } - - // Initial seed progress is 0. - return w.initEncryption(masterKey, seed, 0) -} - -// Reset will reset the wallet, clearing the database and returning it to -// the unencrypted state. -func (w *Wallet) Reset() error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - - // Unsubscribe if we are currently subscribed. - w.subscribedMu.Lock() - if w.subscribed { - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - w.subscribed = false - } - w.subscribedMu.Unlock() - - w.mu.Lock() - defer w.mu.Unlock() - - err := dbReset(w.dbTx) - if err != nil { - return err - } - err = w.syncDB() - if err != nil { - return err - } - w.wipeSecrets() - w.keys = make(map[types.Address]spendableKey) - w.lookahead = make(map[types.Address]uint64) - w.seeds = []modules.Seed{} - w.unconfirmedProcessedTransactions = processedTransactionList{} - w.unlocked = false - w.encrypted = false - - return nil -} - -// InitFromSeed functions like Init, but using a specified seed. Unlike Init, -// the blockchain will be scanned to determine the seed's progress. For this -// reason, InitFromSeed should not be called until the blockchain is fully -// synced. -func (w *Wallet) InitFromSeed(masterKey modules.WalletKey, seed modules.Seed) error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - - if !w.cs.Synced() { - return errors.New("cannot init from seed until blockchain is synced") - } - - // If masterKey is blank, use the hash of the seed. - var err error - if masterKey == nil { - h := types.NewHasher() - h.E.Write(seed[:]) - hash := h.Sum() - key := make([]byte, len(hash)) - copy(key[:], hash[:]) - masterKey = modules.WalletKey(key) - } - - if !w.scanLock.TryLock() { - return errScanInProgress - } - defer w.scanLock.Unlock() - - // Estimate the primarySeedProgress by scanning the blockchain. - s := newSeedScanner(seed, w.log) - if err := s.scan(w.cs, w.tg.StopChan()); err != nil { - return err - } - // NOTE: each time the wallet generates a key for index n, it sets its - // progress to n+1, so the progress should be the largest index seen + 1. - // We also add 10% as a buffer because the seed may have addresses in the - // wild that have not appeared in the blockchain yet. - progress := s.largestIndexSeen + 1 - progress += progress / 10 - w.log.Printf("INFO: found key index %v in blockchain. Setting primary seed progress to %v", s.largestIndexSeen, progress) - - // Initialize the wallet with the appropriate seed progress. - w.mu.Lock() - defer w.mu.Unlock() - _, err = w.initEncryption(masterKey, seed, progress) - return err -} - -// Unlocked indicates whether the wallet is locked or unlocked. -func (w *Wallet) Unlocked() (bool, error) { - if err := w.tg.Add(); err != nil { - return false, err - } - defer w.tg.Done() - w.mu.RLock() - defer w.mu.RUnlock() - return w.unlocked, nil -} - -// Lock will erase all keys from memory and prevent the wallet from spending -// coins until it is unlocked. -func (w *Wallet) Lock() error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - return w.managedLock() -} - -// ChangeKey changes the wallet's encryption key from masterKey to newKey. -func (w *Wallet) ChangeKey(masterKey modules.WalletKey, newKey modules.WalletKey) error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - - return w.managedChangeKey(masterKey, newKey) -} - -// ChangeKeyWithSeed is the same as ChangeKey but uses the primary seed -// instead of the current masterKey. -func (w *Wallet) ChangeKeyWithSeed(seed modules.Seed, newKey modules.WalletKey) error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - mk, err := w.managedMasterKey(seed) - if err != nil { - return modules.AddContext(err, "failed to retrieve masterkey by seed") - } - return w.managedChangeKey(mk, newKey) -} - -// IsMasterKey verifies that the masterKey is the key used to encrypt the -// wallet. -func (w *Wallet) IsMasterKey(masterKey modules.WalletKey) (bool, error) { - if err := w.tg.Add(); err != nil { - return false, err - } - defer w.tg.Done() - w.mu.Lock() - defer w.mu.Unlock() - - // Check provided key. - err := checkMasterKey(w.dbTx, masterKey) - if modules.ContainsError(err, modules.ErrBadEncryptionKey) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -// UnlockAsync will decrypt the wallet seed and load all of the addresses into -// memory. -func (w *Wallet) UnlockAsync(masterKey modules.WalletKey) <-chan error { - errChan := make(chan error, 1) - defer close(errChan) - // By having the wallet's ThreadGroup track the Unlock method, we ensure - // that Unlock will never unlock the wallet once the ThreadGroup has been - // stopped. Without this precaution, the wallet's Close method would be - // unsafe because it would theoretically be possible for another function - // to Unlock the wallet in the short interval after Close calls w.Lock - // and before Close calls w.mu.Lock. - if err := w.tg.Add(); err != nil { - errChan <- err - return errChan - } - defer w.tg.Done() - - if !w.scanLock.TryLock() { - errChan <- errScanInProgress - return errChan - } - defer w.scanLock.Unlock() - - w.log.Println("INFO: unlocking wallet.") - - // Initialize all of the keys in the wallet under a lock. While holding the - // lock, also grab the subscriber status. - return w.managedUnlock(masterKey) -} - -// Unlock will decrypt the wallet seed and load all of the addresses into -// memory. -func (w *Wallet) Unlock(masterKey modules.WalletKey) error { - return <-w.UnlockAsync(masterKey) -} - -// managedChangeKey safely performs the database operations required to change -// the wallet's encryption key. -func (w *Wallet) managedChangeKey(masterKey modules.WalletKey, newKey modules.WalletKey) error { - w.mu.Lock() - encrypted := w.encrypted - w.mu.Unlock() - if !encrypted { - return errUnencryptedWallet - } - - // Grab the current seeds. - var encryptedPrimarySeed encryptedSeed - var encryptedAuxiliarySeeds []encryptedSeed - var unseededKeys []encryptedSpendableKey - - err := func() error { - w.mu.Lock() - defer w.mu.Unlock() - - // Verify masterKey. - err := checkMasterKey(w.dbTx, masterKey) - if err != nil { - return modules.AddContext(err, "unable to verify master key") - } - - // encryptedPrimarySeed. - encryptedPrimarySeed, err = dbGetPrimarySeed(w.dbTx) - if err != nil { - return modules.AddContext(err, "unable to decode primary seed file") - } - - // encryptedAuxiliarySeeds. - encryptedAuxiliarySeeds, err = dbGetAuxiliarySeeds(w.dbTx) - if err != nil { - return modules.AddContext(err, "unable to decode auxiliary seed file") - } - - // unseededKeys. - unseededKeys, err = dbGetUnseededKeys(w.dbTx) - if err != nil { - return modules.AddContext(err, "unable to decode unseeded key file") - } - - return nil - }() - if err != nil { - return err - } - - // Decrypt key files. - var primarySeed modules.Seed - var auxiliarySeeds []modules.Seed - var spendableKeys []spendableKey - - primarySeed, err = decryptSeed(masterKey, encryptedPrimarySeed) - if err != nil { - return modules.AddContext(err, "unable to decrypt primary seed file") - } - for _, as := range encryptedAuxiliarySeeds { - auxSeed, err := decryptSeed(masterKey, as) - if err != nil { - return modules.AddContext(err, "unable to decrypt auxiliary seed file") - } - auxiliarySeeds = append(auxiliarySeeds, auxSeed) - } - for _, uk := range unseededKeys { - sk, err := decryptSpendableKey(masterKey, uk) - if err != nil { - return modules.AddContext(err, "unable to decrypt unseed key file") - } - spendableKeys = append(spendableKeys, sk) - } - - // Encrypt new keys using newKey. - var newPrimarySeed encryptedSeed - var newAuxiliarySeeds []encryptedSeed - var newUnseededKeys []encryptedSpendableKey - - newPrimarySeed = createSeed(newKey, primarySeed) - for _, seed := range auxiliarySeeds { - as := createSeed(newKey, seed) - newAuxiliarySeeds = append(newAuxiliarySeeds, as) - } - for _, sk := range spendableKeys { - var uk encryptedSpendableKey - frand.Read(uk.Salt[:]) - encryptionKey := saltedEncryptionKey(newKey, uk.Salt) - uk.EncryptionVerification, err = modules.Encrypt(encryptionKey, verificationPlaintext) - if err != nil { - return modules.AddContext(err, "failed to encrypt verification") - } - - // Encrypt and save the key. - var buf bytes.Buffer - e := types.NewEncoder(&buf) - sk.EncodeTo(e) - e.Flush() - uk.SpendableKey, err = modules.Encrypt(encryptionKey, buf.Bytes()) - if err != nil { - return modules.AddContext(err, "failed to encrypt unseeded key") - } - newUnseededKeys = append(newUnseededKeys, uk) - } - - // Put the newly encrypted keys in the database. - err = func() error { - w.mu.Lock() - defer w.mu.Unlock() - - err = dbPutPrimarySeed(w.dbTx, newPrimarySeed) - if err != nil { - return modules.AddContext(err, "unable to put primary key into db") - } - err = dbPutAuxiliarySeeds(w.dbTx, newAuxiliarySeeds) - if err != nil { - return modules.AddContext(err, "unable to put auxiliary key into db") - } - err = dbPutUnseededKeys(w.dbTx, newUnseededKeys) - if err != nil { - return modules.AddContext(err, "unable to put unseeded key into db") - } - - wpk := walletPasswordEncryptionKey(primarySeed, dbGetWalletSalt(w.dbTx)) - encrypted, err := modules.Encrypt(wpk, newKey) - if err != nil { - return modules.AddContext(err, "unable to encrypt wallet password") - } - err = dbPutWalletPassword(w.dbTx, encrypted) - if err != nil { - return modules.AddContext(err, "unable to put wallet password into db") - } - - return nil - }() - if err != nil { - return err - } - - return nil -} - -// managedLock will erase all keys from memory and prevent the wallet from -// spending coins until it is unlocked. -func (w *Wallet) managedLock() error { - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.ErrLockedWallet - } - w.log.Println("INFO: locking wallet.") - - // Wipe all of the seeds and secret keys. They will be replaced upon - // calling 'Unlock' again. Note that since the public keys are not wiped, - // we can continue processing blocks. - w.wipeSecrets() - w.unlocked = false - return nil -} - -// managedUnlocked indicates whether the wallet is locked or unlocked. -func (w *Wallet) managedUnlocked() bool { - w.mu.RLock() - defer w.mu.RUnlock() - return w.unlocked -} diff --git a/modules/wallet/event.go b/modules/wallet/event.go new file mode 100644 index 0000000..87b3c18 --- /dev/null +++ b/modules/wallet/event.go @@ -0,0 +1,591 @@ +package wallet + +import ( + "fmt" + "time" + + "github.com/mike76-dev/sia-satellite/modules" + "go.sia.tech/core/consensus" + "go.sia.tech/core/types" +) + +// Event type constants. +const ( + EventTypeTransaction = "transaction" + EventTypeMinerPayout = "miner payout" + EventTypeMissedFileContract = "missed file contract" +) + +// Annotate annotates a txpool transaction. +func Annotate(txn types.Transaction, ownsAddress func(types.Address) bool) modules.PoolTransaction { + ptxn := modules.PoolTransaction{ID: txn.ID(), Raw: txn, Type: "unknown"} + + var totalValue types.Currency + for _, sco := range txn.SiacoinOutputs { + totalValue = totalValue.Add(sco.Value) + } + for _, fc := range txn.FileContracts { + totalValue = totalValue.Add(fc.Payout) + } + for _, fee := range txn.MinerFees { + totalValue = totalValue.Add(fee) + } + + var ownedIn, ownedOut int + for _, sci := range txn.SiacoinInputs { + if ownsAddress(sci.UnlockConditions.UnlockHash()) { + ownedIn++ + } + } + for _, sco := range txn.SiacoinOutputs { + if ownsAddress(sco.Address) { + ownedOut++ + } + } + var ins, outs string + switch { + case ownedIn == 0: + ins = "none" + case ownedIn < len(txn.SiacoinInputs): + ins = "some" + case ownedIn == len(txn.SiacoinInputs): + ins = "all" + } + switch { + case ownedOut == 0: + outs = "none" + case ownedOut < len(txn.SiacoinOutputs): + outs = "some" + case ownedOut == len(txn.SiacoinOutputs): + outs = "all" + } + + switch { + case ins == "none" && outs == "none": + ptxn.Type = "unrelated" + case ins == "all": + ptxn.Sent = totalValue + switch { + case outs == "all": + ptxn.Type = "redistribution" + case len(txn.FileContractRevisions) > 0: + ptxn.Type = "contract revision" + case len(txn.StorageProofs) > 0: + ptxn.Type = "storage proof" + case len(txn.ArbitraryData) > 0: + ptxn.Type = "announcement" + default: + ptxn.Type = "send" + } + case ins == "none" && outs != "none": + ptxn.Type = "receive" + for _, sco := range txn.SiacoinOutputs { + if ownsAddress(sco.Address) { + ptxn.Received = ptxn.Received.Add(sco.Value) + } + } + case ins == "some" && len(txn.FileContracts) > 0: + ptxn.Type = "contract" + for _, fc := range txn.FileContracts { + var validLocked, missedLocked types.Currency + for _, sco := range fc.ValidProofOutputs { + if ownsAddress(sco.Address) { + validLocked = validLocked.Add(fc.Payout) + } + } + for _, sco := range fc.MissedProofOutputs { + if ownsAddress(sco.Address) { + missedLocked = missedLocked.Add(fc.Payout) + } + } + if validLocked.Cmp(missedLocked) > 0 { + ptxn.Locked = ptxn.Locked.Add(validLocked) + } else { + ptxn.Locked = ptxn.Locked.Add(missedLocked) + } + } + } + + return ptxn +} + +// An Event is something interesting that happened on the Sia blockchain. +type Event struct { + Index types.ChainIndex + Timestamp time.Time + Relevant []types.Address + Val interface{ EventType() string } +} + +// EventType implements Event. +func (*EventTransaction) EventType() string { return EventTypeTransaction } + +// EventType implements Event. +func (*EventMinerPayout) EventType() string { return EventTypeMinerPayout } + +// EventType implements Event. +func (*EventMissedFileContract) EventType() string { return EventTypeMissedFileContract } + +// String implements fmt.Stringer. +func (e *Event) String() string { + return fmt.Sprintf("%s at %s: %s", e.Val.EventType(), e.Timestamp, e.Val) +} + +// A HostAnnouncement represents a host announcement within an EventTransaction. +type HostAnnouncement struct { + PublicKey types.PublicKey `json:"publicKey"` + NetAddress string `json:"netAddress"` +} + +// A SiafundInput represents a siafund input within an EventTransaction. +type SiafundInput struct { + SiafundElement types.SiafundElement `json:"siafundElement"` + ClaimElement types.SiacoinElement `json:"claimElement"` +} + +// A FileContract represents a file contract within an EventTransaction. +type FileContract struct { + FileContract types.FileContractElement `json:"fileContract"` + // only non-nil if transaction revised contract + Revision *types.FileContract `json:"revision,omitempty"` + // only non-nil if transaction resolved contract + ValidOutputs []types.SiacoinElement `json:"validOutputs,omitempty"` +} + +// A V2FileContract represents a v2 file contract within an EventTransaction. +type V2FileContract struct { + FileContract types.V2FileContractElement `json:"fileContract"` + // only non-nil if transaction revised contract + Revision *types.V2FileContract `json:"revision,omitempty"` + // only non-nil if transaction resolved contract + Resolution types.V2FileContractResolutionType `json:"resolution,omitempty"` + Outputs []types.SiacoinElement `json:"outputs,omitempty"` +} + +// An EventTransaction represents a transaction that affects the wallet. +type EventTransaction struct { + ID types.TransactionID `json:"id"` + SiacoinInputs []types.SiacoinElement `json:"siacoinInputs"` + SiacoinOutputs []types.SiacoinElement `json:"siacoinOutputs"` + SiafundInputs []SiafundInput `json:"siafundInputs"` + SiafundOutputs []types.SiafundElement `json:"siafundOutputs"` + FileContracts []FileContract `json:"fileContracts"` + V2FileContracts []V2FileContract `json:"v2FileContracts"` + HostAnnouncements []HostAnnouncement `json:"hostAnnouncements"` + Fee types.Currency `json:"fee"` +} + +// An EventMinerPayout represents a miner payout from a block. +type EventMinerPayout struct { + SiacoinOutput types.SiacoinElement `json:"siacoinOutput"` +} + +// An EventMissedFileContract represents a file contract that has expired +// without a storage proof +type EventMissedFileContract struct { + FileContract types.FileContractElement `json:"fileContract"` + MissedOutputs []types.SiacoinElement `json:"missedOutputs"` +} + +// String implements fmt.Stringer. +func (et *EventTransaction) String() string { + result := et.ID.String() + if len(et.SiacoinOutputs) > 0 { + result += ": Siacoin outputs: " + } + for i, sco := range et.SiacoinOutputs { + result += sco.SiacoinOutput.Address.String() + result += fmt.Sprintf(" (%s)", sco.SiacoinOutput.Value) + if i < len(et.SiacoinOutputs)-1 { + result += ", " + } + } + if len(et.SiafundOutputs) > 0 { + result += "; Siafund outputs: " + } + for i, sfo := range et.SiafundOutputs { + result += sfo.SiafundOutput.Address.String() + result += fmt.Sprintf(" (%d SF)", sfo.SiafundOutput.Value) + if i < len(et.SiafundOutputs)-1 { + result += ", " + } + } + return result +} + +// String implements fmt.Stringer. +func (emp *EventMinerPayout) String() string { + return fmt.Sprintf("%s (%s)", + emp.SiacoinOutput.SiacoinOutput.Address.String(), + emp.SiacoinOutput.SiacoinOutput.Value, + ) +} + +// String implements fmt.Stringer. +func (emfc *EventMissedFileContract) String() string { + return emfc.FileContract.ID.String() +} + +// A ChainUpdate is a set of changes to the consensus state. +type ChainUpdate interface { + ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool)) + ForEachSiafundElement(func(sfe types.SiafundElement, spent bool)) + ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool)) + ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType)) +} + +// AppliedEvents extracts a list of relevant events from a chain update. +func AppliedEvents(cs consensus.State, b types.Block, cu ChainUpdate, relevant func(types.Address) bool) []Event { + var events []Event + addEvent := func(v interface{ EventType() string }, relevant []types.Address) { + // Dedup relevant addresses. + seen := make(map[types.Address]bool) + unique := relevant[:0] + for _, addr := range relevant { + if !seen[addr] { + unique = append(unique, addr) + seen[addr] = true + } + } + + events = append(events, Event{ + Timestamp: b.Timestamp, + Index: cs.Index, + Relevant: unique, + Val: v, + }) + } + + // Do a first pass to see if there's anything relevant in the block. + relevantContract := func(fc types.FileContract) (addrs []types.Address) { + for _, sco := range fc.ValidProofOutputs { + if relevant(sco.Address) { + addrs = append(addrs, sco.Address) + } + } + for _, sco := range fc.MissedProofOutputs { + if relevant(sco.Address) { + addrs = append(addrs, sco.Address) + } + } + return + } + relevantV2Contract := func(fc types.V2FileContract) (addrs []types.Address) { + if relevant(fc.RenterOutput.Address) { + addrs = append(addrs, fc.RenterOutput.Address) + } + if relevant(fc.HostOutput.Address) { + addrs = append(addrs, fc.HostOutput.Address) + } + return + } + relevantV2ContractResolution := func(res types.V2FileContractResolutionType) (addrs []types.Address) { + switch r := res.(type) { + case *types.V2FileContractFinalization: + return relevantV2Contract(types.V2FileContract(*r)) + case *types.V2FileContractRenewal: + return relevantV2Contract(r.FinalRevision) + } + return + } + anythingRelevant := func() (ok bool) { + cu.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + if ok || relevant(sce.SiacoinOutput.Address) { + ok = true + } + }) + cu.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + if ok || relevant(sfe.SiafundOutput.Address) { + ok = true + } + }) + cu.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { + if ok || len(relevantContract(fce.FileContract)) > 0 || (rev != nil && len(relevantContract(rev.FileContract)) > 0) { + ok = true + } + }) + cu.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { + if ok || + len(relevantV2Contract(fce.V2FileContract)) > 0 || + (rev != nil && len(relevantV2Contract(rev.V2FileContract)) > 0) || + (res != nil && len(relevantV2ContractResolution(res)) > 0) { + ok = true + } + }) + return + }() + if !anythingRelevant { + return nil + } + + // Collect all elements. + sces := make(map[types.SiacoinOutputID]types.SiacoinElement) + sfes := make(map[types.SiafundOutputID]types.SiafundElement) + fces := make(map[types.FileContractID]types.FileContractElement) + v2fces := make(map[types.FileContractID]types.V2FileContractElement) + cu.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + sce.MerkleProof = nil + sces[types.SiacoinOutputID(sce.ID)] = sce + }) + cu.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + sfe.MerkleProof = nil + sfes[types.SiafundOutputID(sfe.ID)] = sfe + }) + cu.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { + fce.MerkleProof = nil + fces[types.FileContractID(fce.ID)] = fce + }) + cu.ForEachV2FileContractElement(func(fce types.V2FileContractElement, rev *types.V2FileContractElement, res types.V2FileContractResolutionType) { + fce.MerkleProof = nil + v2fces[types.FileContractID(fce.ID)] = fce + }) + + relevantTxn := func(txn types.Transaction) (addrs []types.Address) { + for _, sci := range txn.SiacoinInputs { + if sce := sces[sci.ParentID]; relevant(sce.SiacoinOutput.Address) { + addrs = append(addrs, sce.SiacoinOutput.Address) + } + } + for _, sco := range txn.SiacoinOutputs { + if relevant(sco.Address) { + addrs = append(addrs, sco.Address) + } + } + for _, sfi := range txn.SiafundInputs { + if sfe := sfes[sfi.ParentID]; relevant(sfe.SiafundOutput.Address) { + addrs = append(addrs, sfe.SiafundOutput.Address) + } + } + for _, sfo := range txn.SiafundOutputs { + if relevant(sfo.Address) { + addrs = append(addrs, sfo.Address) + } + } + for _, fc := range txn.FileContracts { + addrs = append(addrs, relevantContract(fc)...) + } + for _, fcr := range txn.FileContractRevisions { + addrs = append(addrs, relevantContract(fcr.FileContract)...) + } + for _, sp := range txn.StorageProofs { + addrs = append(addrs, relevantContract(fces[sp.ParentID].FileContract)...) + } + return + } + + relevantV2Txn := func(txn types.V2Transaction) (addrs []types.Address) { + for _, sci := range txn.SiacoinInputs { + if relevant(sci.Parent.SiacoinOutput.Address) { + addrs = append(addrs, sci.Parent.SiacoinOutput.Address) + } + } + for _, sco := range txn.SiacoinOutputs { + if relevant(sco.Address) { + addrs = append(addrs, sco.Address) + } + } + for _, sfi := range txn.SiafundInputs { + if relevant(sfi.Parent.SiafundOutput.Address) { + addrs = append(addrs, sfi.Parent.SiafundOutput.Address) + } + } + for _, sfo := range txn.SiafundOutputs { + if relevant(sfo.Address) { + addrs = append(addrs, sfo.Address) + } + } + for _, fc := range txn.FileContracts { + addrs = append(addrs, relevantV2Contract(fc)...) + } + for _, fcr := range txn.FileContractRevisions { + addrs = append(addrs, relevantV2Contract(fcr.Parent.V2FileContract)...) + addrs = append(addrs, relevantV2Contract(fcr.Revision)...) + } + for _, fcr := range txn.FileContractResolutions { + addrs = append(addrs, relevantV2Contract(fcr.Parent.V2FileContract)...) + switch r := fcr.Resolution.(type) { + case *types.V2FileContractFinalization: + addrs = append(addrs, relevantV2Contract(types.V2FileContract(*r))...) + case *types.V2FileContractRenewal: + addrs = append(addrs, relevantV2Contract(r.FinalRevision)...) + } + } + return + } + + // Handle v1 transactions. + for _, txn := range b.Transactions { + relevant := relevantTxn(txn) + if len(relevant) == 0 { + continue + } + + e := &EventTransaction{ + ID: txn.ID(), + SiacoinInputs: make([]types.SiacoinElement, len(txn.SiacoinInputs)), + SiacoinOutputs: make([]types.SiacoinElement, len(txn.SiacoinOutputs)), + SiafundInputs: make([]SiafundInput, len(txn.SiafundInputs)), + SiafundOutputs: make([]types.SiafundElement, len(txn.SiafundOutputs)), + } + + for i := range txn.SiacoinInputs { + e.SiacoinInputs[i] = sces[txn.SiacoinInputs[i].ParentID] + } + for i := range txn.SiacoinOutputs { + e.SiacoinOutputs[i] = sces[txn.SiacoinOutputID(i)] + } + for i := range txn.SiafundInputs { + e.SiafundInputs[i] = SiafundInput{ + SiafundElement: sfes[txn.SiafundInputs[i].ParentID], + ClaimElement: sces[txn.SiafundClaimOutputID(i)], + } + } + for i := range txn.SiafundOutputs { + e.SiafundOutputs[i] = sfes[txn.SiafundOutputID(i)] + } + addContract := func(id types.FileContractID) *FileContract { + for i := range e.FileContracts { + if types.FileContractID(e.FileContracts[i].FileContract.ID) == id { + return &e.FileContracts[i] + } + } + e.FileContracts = append(e.FileContracts, FileContract{FileContract: fces[id]}) + return &e.FileContracts[len(e.FileContracts)-1] + } + for i := range txn.FileContracts { + addContract(txn.FileContractID(i)) + } + for i := range txn.FileContractRevisions { + fc := addContract(txn.FileContractRevisions[i].ParentID) + rev := txn.FileContractRevisions[i].FileContract + fc.Revision = &rev + } + for i := range txn.StorageProofs { + fc := addContract(txn.StorageProofs[i].ParentID) + fc.ValidOutputs = make([]types.SiacoinElement, len(fc.FileContract.FileContract.ValidProofOutputs)) + for i := range fc.ValidOutputs { + fc.ValidOutputs[i] = sces[types.FileContractID(fc.FileContract.ID).ValidOutputID(i)] + } + } + for _, arb := range txn.ArbitraryData { + var prefix types.Specifier + var uk types.UnlockKey + d := types.NewBufDecoder(arb) + prefix.DecodeFrom(d) + netAddress := d.ReadString() + uk.DecodeFrom(d) + if d.Err() == nil && prefix == types.NewSpecifier("HostAnnouncement") && + uk.Algorithm == types.SpecifierEd25519 && len(uk.Key) == len(types.PublicKey{}) { + e.HostAnnouncements = append(e.HostAnnouncements, HostAnnouncement{ + PublicKey: *(*types.PublicKey)(uk.Key), + NetAddress: netAddress, + }) + } + } + for i := range txn.MinerFees { + e.Fee = e.Fee.Add(txn.MinerFees[i]) + } + + addEvent(e, relevant) + } + + // Handle v2 transactions. + for _, txn := range b.V2Transactions() { + relevant := relevantV2Txn(txn) + if len(relevant) == 0 { + continue + } + + txid := txn.ID() + e := &EventTransaction{ + ID: txid, + SiacoinInputs: make([]types.SiacoinElement, len(txn.SiacoinInputs)), + SiacoinOutputs: make([]types.SiacoinElement, len(txn.SiacoinOutputs)), + SiafundInputs: make([]SiafundInput, len(txn.SiafundInputs)), + SiafundOutputs: make([]types.SiafundElement, len(txn.SiafundOutputs)), + } + for i := range txn.SiacoinInputs { + // NOTE: here (and elsewhere), we fetch the element from our maps, + // rather than using the parent directly, because our copy has its + // Merkle proof nil'd out. + e.SiacoinInputs[i] = sces[types.SiacoinOutputID(txn.SiacoinInputs[i].Parent.ID)] + } + for i := range txn.SiacoinOutputs { + e.SiacoinOutputs[i] = sces[txn.SiacoinOutputID(txid, i)] + } + for i := range txn.SiafundInputs { + sfoid := types.SiafundOutputID(txn.SiafundInputs[i].Parent.ID) + e.SiafundInputs[i] = SiafundInput{ + SiafundElement: sfes[sfoid], + ClaimElement: sces[sfoid.ClaimOutputID()], + } + } + for i := range txn.SiafundOutputs { + e.SiafundOutputs[i] = sfes[txn.SiafundOutputID(txid, i)] + } + addContract := func(id types.FileContractID) *V2FileContract { + for i := range e.V2FileContracts { + if types.FileContractID(e.V2FileContracts[i].FileContract.ID) == id { + return &e.V2FileContracts[i] + } + } + e.V2FileContracts = append(e.V2FileContracts, V2FileContract{FileContract: v2fces[id]}) + return &e.V2FileContracts[len(e.V2FileContracts)-1] + } + for i := range txn.FileContracts { + addContract(txn.V2FileContractID(txid, i)) + } + for _, fcr := range txn.FileContractRevisions { + fc := addContract(types.FileContractID(fcr.Parent.ID)) + fc.Revision = &fcr.Revision + } + for _, fcr := range txn.FileContractResolutions { + fc := addContract(types.FileContractID(fcr.Parent.ID)) + fc.Resolution = fcr.Resolution + fc.Outputs = []types.SiacoinElement{ + sces[types.FileContractID(fcr.Parent.ID).V2RenterOutputID()], + sces[types.FileContractID(fcr.Parent.ID).V2HostOutputID()], + } + } + for _, a := range txn.Attestations { + if a.Key == "HostAnnouncement" { + e.HostAnnouncements = append(e.HostAnnouncements, HostAnnouncement{ + PublicKey: a.PublicKey, + NetAddress: string(a.Value), + }) + } + } + + e.Fee = txn.MinerFee + addEvent(e, relevant) + } + + // Handle missed contracts. + cu.ForEachFileContractElement(func(fce types.FileContractElement, rev *types.FileContractElement, resolved, valid bool) { + if resolved && !valid { + relevant := relevantContract(fce.FileContract) + if len(relevant) == 0 { + return + } + missedOutputs := make([]types.SiacoinElement, len(fce.FileContract.MissedProofOutputs)) + for i := range missedOutputs { + missedOutputs[i] = sces[types.FileContractID(fce.ID).MissedOutputID(i)] + } + addEvent(&EventMissedFileContract{ + FileContract: fce, + MissedOutputs: missedOutputs, + }, relevant) + } + }) + + // Handle block rewards. + for i := range b.MinerPayouts { + if relevant(b.MinerPayouts[i].Address) { + addEvent(&EventMinerPayout{ + SiacoinOutput: sces[cs.Index.ID.MinerOutputID(i)], + }, []types.Address{b.MinerPayouts[i].Address}) + } + } + + return events +} diff --git a/modules/wallet/money.go b/modules/wallet/money.go index 0cc1ec8..118629f 100644 --- a/modules/wallet/money.go +++ b/modules/wallet/money.go @@ -2,16 +2,16 @@ package wallet import ( "errors" + "fmt" + "sort" + "time" "github.com/mike76-dev/sia-satellite/modules" - + "go.sia.tech/core/consensus" "go.sia.tech/core/types" + "go.uber.org/zap" ) -// estimatedTransactionSize is the estimated size of a transaction used to send -// siacoins. -const estimatedTransactionSize = 750 - // sortedOutputs is a struct containing a slice of siacoin outputs and their // corresponding ids. sortedOutputs can be sorted using the sort package. type sortedOutputs struct { @@ -19,282 +19,331 @@ type sortedOutputs struct { outputs []types.SiacoinOutput } -// DustThreshold returns the quantity per byte below which a Currency is -// considered to be Dust. -func (w *Wallet) DustThreshold() (types.Currency, error) { - if err := w.tg.Add(); err != nil { - return types.Currency{}, modules.ErrWalletShutdown - } - defer w.tg.Done() +// Len returns the number of elements in the sortedOutputs struct. +func (so sortedOutputs) Len() int { + return len(so.ids) +} - minFee, _ := w.tpool.FeeEstimation() - return minFee.Mul64(3), nil +// Less returns whether element 'i' is less than element 'j'. The currency +// value of each output is used for comparison. +func (so sortedOutputs) Less(i, j int) bool { + return so.outputs[i].Value.Cmp(so.outputs[j].Value) < 0 } -// ConfirmedBalance returns the balance of the wallet according to all of the -// confirmed transactions. -func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance uint64, siafundClaimBalance types.Currency, err error) { - if err := w.tg.Add(); err != nil { - return types.ZeroCurrency, 0, types.ZeroCurrency, modules.ErrWalletShutdown - } - defer w.tg.Done() +// Swap swaps two elements in the sortedOutputs set. +func (so sortedOutputs) Swap(i, j int) { + so.ids[i], so.ids[j] = so.ids[j], so.ids[i] + so.outputs[i], so.outputs[j] = so.outputs[j], so.outputs[i] +} - // dustThreshold has to be obtained separate from the lock. - dustThreshold, err := w.DustThreshold() - if err != nil { - return types.ZeroCurrency, 0, types.ZeroCurrency, modules.ErrWalletShutdown - } +// DustThreshold returns the quantity per byte below which a Currency is +// considered to be Dust. +func (w *Wallet) DustThreshold() types.Currency { + return w.cm.RecommendedFee().Mul64(3) +} +// ConfirmedBalance returns the total balance of the wallet. +func (w *Wallet) ConfirmedBalance() (siacoins, immatureSiacoins types.Currency, siafunds uint64) { w.mu.Lock() defer w.mu.Unlock() - // Ensure durability of reported balance. - if err = w.syncDB(); err != nil { - return - } - - dbForEachSiacoinOutput(w.dbTx, func(_ types.SiacoinOutputID, sco types.SiacoinOutput) { - if sco.Value.Cmp(dustThreshold) > 0 { - siacoinBalance = siacoinBalance.Add(sco.Value) + dustThreshold := w.DustThreshold() + height := w.cm.Tip().Height + for _, sce := range w.sces { + if sce.SiacoinOutput.Value.Cmp(dustThreshold) > 0 { + if height >= sce.MaturityHeight { + siacoins = siacoins.Add(sce.SiacoinOutput.Value) + } else { + immatureSiacoins = immatureSiacoins.Add(sce.SiacoinOutput.Value) + } } - }) - - siafundPool, err := dbGetSiafundPool(w.dbTx) - if err != nil { - return } - dbForEachSiafundOutput(w.dbTx, func(_ types.SiafundOutputID, sfo types.SiafundOutput, claimStart types.Currency) { - siafundBalance = siafundBalance + sfo.Value - if claimStart.Cmp(siafundPool) > 0 { - // Skip claims larger than the Siafund pool. This should only - // occur if the Siafund pool has not been initialized yet. - return - } - siafundClaimBalance = siafundClaimBalance.Add(siafundPool.Sub(claimStart).Mul64(sfo.Value).Div64(modules.SiafundCount)) - }) - return -} -// UnconfirmedBalance returns the number of outgoing and incoming Siacoins in -// the unconfirmed transaction set. Refund outputs are included in this -// reporting. -func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency, err error) { - if err := w.tg.Add(); err != nil { - return types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown + for _, sfe := range w.sfes { + siafunds += sfe.SiafundOutput.Value } - defer w.tg.Done() - // dustThreshold has to be obtained separate from the lock. - dustThreshold, err := w.DustThreshold() - if err != nil { - return types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown - } + return +} +// UnconfirmedBalance returns the balance of the wallet contained in +// the unconfirmed transactions. +func (w *Wallet) UnconfirmedBalance() (outgoing, incoming types.Currency) { w.mu.Lock() defer w.mu.Unlock() - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - pt := curr.txn - for _, input := range pt.Inputs { - if input.FundType == specifierSiacoinInput && input.WalletAddress { - outgoingSiacoins = outgoingSiacoins.Add(input.Value) + dustThreshold := w.DustThreshold() + ids := make(map[types.SiacoinOutputID]types.Currency) + for _, sce := range w.sces { + ids[types.SiacoinOutputID(sce.ID)] = sce.SiacoinOutput.Value + } + + txns := w.cm.PoolTransactions() + for _, txn := range txns { + for _, sci := range txn.SiacoinInputs { + if value, exists := ids[sci.ParentID]; exists { + outgoing = outgoing.Add(value) } } - for _, output := range pt.Outputs { - if output.FundType == types.SpecifierSiacoinOutput && output.WalletAddress && output.Value.Cmp(dustThreshold) > 0 { - incomingSiacoins = incomingSiacoins.Add(output.Value) + for _, sco := range txn.SiacoinOutputs { + if _, exists := w.addrs[sco.Address]; exists && sco.Value.Cmp(dustThreshold) > 0 { + incoming = incoming.Add(sco.Value) } } - curr = curr.next } + return } -// SendSiacoins creates a transaction sending 'amount' to 'dest'. The -// transaction is submitted to the transaction pool and is also returned. Fees -// are added to the amount sent. -func (w *Wallet) SendSiacoins(amount types.Currency, dest types.Address) ([]types.Transaction, error) { - if err := w.tg.Add(); err != nil { - err = modules.ErrWalletShutdown - return nil, err +// AddressBalance returns the balance of the given address. +func (w *Wallet) AddressBalance(addr types.Address) (siacoins types.Currency, siafunds uint64) { + w.mu.Lock() + defer w.mu.Unlock() + + sce, exists := w.sces[addr] + if exists { + siacoins = sce.SiacoinOutput.Value + } + + sfe, exists := w.sfes[addr] + if exists { + siafunds = sfe.SiafundOutput.Value } - defer w.tg.Done() - _, fee := w.tpool.FeeEstimation() - fee = fee.Mul64(estimatedTransactionSize) - return w.managedSendSiacoins(amount, fee, dest) + return } -// SendSiacoinsFeeIncluded creates a transaction sending 'amount' to 'dest'. The -// transaction is submitted to the transaction pool and is also returned. Fees -// are subtracted from the amount sent. -func (w *Wallet) SendSiacoinsFeeIncluded(amount types.Currency, dest types.Address) ([]types.Transaction, error) { - if err := w.tg.Add(); err != nil { - err = modules.ErrWalletShutdown - return nil, err +// Fund adds Siacoin inputs with the required amount to the transaction. +func (w *Wallet) Fund(txn *types.Transaction, amount types.Currency) (parents []types.Transaction, toSign []types.Hash256, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if amount.IsZero() { + return nil, nil, nil } - defer w.tg.Done() - _, fee := w.tpool.FeeEstimation() - fee = fee.Mul64(estimatedTransactionSize) - // Don't allow sending an amount equal to the fee, as zero spending is not - // allowed and would error out later. - if amount.Cmp(fee) <= 0 { - w.log.Println("ERROR: attempt to send coins has failed - not enough to cover fee") - return nil, modules.AddContext(modules.ErrLowBalance, "not enough coins to cover fee") + var utxos []types.SiacoinElement + for _, sce := range w.sces { + utxos = append(utxos, sce) } - return w.managedSendSiacoins(amount.Sub(fee), fee, dest) -} + sort.Slice(utxos, func(i, j int) bool { + return utxos[i].SiacoinOutput.Value.Cmp(utxos[j].SiacoinOutput.Value) > 0 + }) -// managedSendSiacoins creates a transaction sending 'amount' to 'dest'. The -// transaction is submitted to the transaction pool and is also returned. -func (w *Wallet) managedSendSiacoins(amount, fee types.Currency, dest types.Address) (txns []types.Transaction, err error) { - // Check if consensus is synced. - if !w.cs.Synced() { - return nil, errors.New("cannot send Siacoin until fully synced") + inPool := make(map[types.SiacoinOutputID]bool) + for _, ptxn := range w.cm.PoolTransactions() { + for _, in := range ptxn.SiacoinInputs { + inPool[in.ParentID] = true + } } - w.mu.RLock() - unlocked := w.unlocked - w.mu.RUnlock() - if !unlocked { - w.log.Println("ERROR: attempt to send coins has failed - wallet is locked") - return nil, modules.ErrLockedWallet + var outputSum types.Currency + var fundingElements []types.SiacoinElement + for _, sce := range utxos { + if w.used[types.Hash256(sce.ID)] || inPool[types.SiacoinOutputID(sce.ID)] { + continue + } + fundingElements = append(fundingElements, sce) + outputSum = outputSum.Add(sce.SiacoinOutput.Value) + if outputSum.Cmp(amount) >= 0 { + break + } } - output := types.SiacoinOutput{ - Value: amount, - Address: dest, + if outputSum.Cmp(amount) < 0 { + return nil, nil, modules.ErrInsufficientBalance + } else if outputSum.Cmp(amount) > 0 { + refundUC, err := w.nextAddress() + defer func() { + if err != nil { + w.markAddressUnused(refundUC) + } + }() + if err != nil { + return nil, nil, err + } + txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ + Value: outputSum.Sub(amount), + Address: refundUC.UnlockHash(), + }) } - txn := types.Transaction{ - SiacoinOutputs: []types.SiacoinOutput{output}, - MinerFees: []types.Currency{fee}, + toSign = make([]types.Hash256, len(fundingElements)) + for i, sce := range fundingElements { + if key, ok := w.keys[sce.SiacoinOutput.Address]; ok { + txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ + ParentID: types.SiacoinOutputID(sce.ID), + UnlockConditions: types.StandardUnlockConditions(key.PublicKey()), + }) + toSign[i] = types.Hash256(sce.ID) + if err := w.insertSpentOutput(sce.ID); err != nil { + return nil, nil, err + } + } } - parentTxn, toSign, err := w.FundTransaction(&txn, amount.Add(fee)) - if err != nil { - w.log.Println("ERROR: attempt to send coins has failed - failed to fund transaction:", err) - w.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) - return nil, modules.AddContext(err, "unable to fund transaction") + return w.cm.UnconfirmedParents(*txn), toSign, nil +} + +// Release marks the outputs as unused. +func (w *Wallet) Release(txnSet []types.Transaction) { + w.mu.Lock() + defer w.mu.Unlock() + for _, txn := range txnSet { + for i := range txn.SiacoinOutputs { + if err := w.removeSpentOutput(types.Hash256(txn.SiacoinOutputID(i))); err != nil { + w.log.Error("couldn't remove spent output", zap.Error(err)) + } + } } +} - cf := modules.FullCoveredFields() - err = w.SignTransaction(&txn, toSign, cf) - if err != nil { - w.log.Println("ERROR: attempt to send coins has failed - failed to sign transaction:", err) - w.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) - return nil, modules.AddContext(err, "unable to sign transaction") +// Reserve reserves the given ids for the given duration. +func (w *Wallet) Reserve(ids []types.Hash256, duration time.Duration) error { + w.mu.Lock() + defer w.mu.Unlock() + + // Check if any of the ids are already reserved. + for _, id := range ids { + if w.used[id] { + return fmt.Errorf("output %q already reserved", id) + } } - txnSet := append([]types.Transaction{parentTxn}, txn) - err = w.tpool.AcceptTransactionSet(txnSet) - if err != nil { - w.log.Println("ERROR: attempt to send coins has failed - transaction pool rejected transaction:", err) - w.ReleaseInputs(txnSet) - return nil, modules.AddContext(err, "unable to get transaction accepted") + // Reserve the ids. + for _, id := range ids { + if err := w.insertSpentOutput(id); err != nil { + return err + } } - w.log.Printf("INFO: submitted a Siacoin transfer transaction set for value %v with fees %v, IDs:\n", amount, fee) - for _, txn := range txnSet { - w.log.Println("\t", txn.ID()) + // Sleep for the duration and then unreserve the ids. + time.AfterFunc(duration, func() { + w.mu.Lock() + defer w.mu.Unlock() + + for _, id := range ids { + w.removeSpentOutput(id) + } + }) + return nil +} + +// Sign signs the specified transaction using keys derived from the wallet seed. +// If toSign is nil, SignTransaction will automatically add Signatures for each +// input owned by the seed. If toSign is not nil, it is a list of IDs of Signatures +// already present in txn; SignTransaction will fill in the Signature field of each. +func (w *Wallet) Sign(cs consensus.State, txn *types.Transaction, toSign []types.Hash256) error { + w.mu.Lock() + defer w.mu.Unlock() + + if len(toSign) == 0 { + // Lazy mode: add standard sigs for every input we own. + for _, sci := range txn.SiacoinInputs { + if key, ok := w.keys[sci.UnlockConditions.UnlockHash()]; ok { + txn.Signatures = append(txn.Signatures, StandardTransactionSignature(types.Hash256(sci.ParentID))) + SignTransaction(cs, txn, len(txn.Signatures)-1, key) + } + } + for _, sfi := range txn.SiafundInputs { + if key, ok := w.keys[sfi.UnlockConditions.UnlockHash()]; ok { + txn.Signatures = append(txn.Signatures, StandardTransactionSignature(types.Hash256(sfi.ParentID))) + SignTransaction(cs, txn, len(txn.Signatures)-1, key) + } + } + return nil } - return txnSet, nil + sigAddr := func(id types.Hash256) (types.Address, bool) { + for _, sci := range txn.SiacoinInputs { + if types.Hash256(sci.ParentID) == id { + return sci.UnlockConditions.UnlockHash(), true + } + } + for _, sfi := range txn.SiafundInputs { + if types.Hash256(sfi.ParentID) == id { + return sfi.UnlockConditions.UnlockHash(), true + } + } + for _, fcr := range txn.FileContractRevisions { + if types.Hash256(fcr.ParentID) == id { + return fcr.UnlockConditions.UnlockHash(), true + } + } + return types.Address{}, false + } + +outer: + for _, parent := range toSign { + for sigIndex, sig := range txn.Signatures { + if sig.ParentID == parent { + if addr, ok := sigAddr(parent); !ok { + return fmt.Errorf("ID %v not present in transaction", parent) + } else if key, ok := w.keys[addr]; !ok { + return fmt.Errorf("missing key for ID %v", parent) + } else { + SignTransaction(cs, txn, sigIndex, key) + continue outer + } + } + } + return fmt.Errorf("signature %v not present in transaction", parent) + } + return nil } -// SendSiacoinsMulti creates a transaction that includes the specified -// outputs. The transaction is submitted to the transaction pool and is also -// returned. -func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) (txns []types.Transaction, err error) { +// SendSiacoins creates a transaction sending 'amount' to 'dest'. The +// transaction is submitted to the transaction pool and is also returned. Fees +// are added to the amount sent. +func (w *Wallet) SendSiacoins(amount types.Currency, dest types.Address) ([]types.Transaction, error) { if err := w.tg.Add(); err != nil { - err = modules.ErrWalletShutdown return nil, err } defer w.tg.Done() - w.log.Println("INFO: beginning call to SendSiacoinsMulti") - // Check if consensus is synced. - if !w.cs.Synced() { - return nil, errors.New("cannot send Siacoin until fully synced") + if !w.synced() { + return nil, errors.New("cannot send Siacoins until fully synced") } - w.mu.RLock() - unlocked := w.unlocked - w.mu.RUnlock() - if !unlocked { - w.log.Println("ERROR: attempt to send coins has failed - wallet is locked") - return nil, modules.ErrLockedWallet + fee := w.cm.RecommendedFee().Mul64(750) + output := types.SiacoinOutput{ + Value: amount, + Address: dest, } - - // Calculate estimated transaction fee. - _, tpoolFee := w.tpool.FeeEstimation() - // We don't want send-to-many transactions to fail. - tpoolFee = tpoolFee.Mul64(2) - // Estimated transaction size in bytes. - tpoolFee = tpoolFee.Mul64(1000 + 60 * uint64(len(outputs))) - txn := types.Transaction{ - SiacoinOutputs: outputs, - MinerFees: []types.Currency{tpoolFee}, + SiacoinOutputs: []types.SiacoinOutput{output}, + MinerFees: []types.Currency{fee}, } - // Calculate total cost to wallet. - // - // NOTE: we only want to call FundTransaction once; that way, it will - // (ideally) fund the entire transaction with a single input, instead of - // many smaller ones. - totalCost := tpoolFee - for _, sco := range outputs { - totalCost = totalCost.Add(sco.Value) - } - parentTxn, toSign, err := w.FundTransaction(&txn, totalCost) + parents, toSign, err := w.Fund(&txn, amount.Add(fee)) if err != nil { - w.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) + w.log.Error("failed to fund transaction", zap.Error(err)) return nil, modules.AddContext(err, "unable to fund transaction") } - cf := modules.FullCoveredFields() - err = w.SignTransaction(&txn, toSign, cf) + for _, id := range toSign { + txn.Signatures = append(txn.Signatures, types.TransactionSignature{ + ParentID: id, + CoveredFields: types.CoveredFields{WholeTransaction: true}, + }) + } + + err = w.Sign(w.cm.TipState(), &txn, toSign) if err != nil { - w.log.Println("ERROR: attempt to send coins has failed - failed to sign transaction:", err) - w.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) + w.log.Error("failed to sign transaction", zap.Error(err)) + w.Release(append(parents, txn)) return nil, modules.AddContext(err, "unable to sign transaction") } - txnSet := append([]types.Transaction{parentTxn}, txn) - w.log.Println("INFO: attempting to broadcast a multi-send over the network") - err = w.tpool.AcceptTransactionSet(txnSet) + txnSet := append(parents, txn) + _, err = w.cm.AddPoolTransactions(txnSet) if err != nil { - w.log.Println("ERROR: attempt to send coins has failed - transaction pool rejected transaction:", err) - w.ReleaseInputs(txnSet) - return nil, modules.AddContext(err, "unable to get transaction accepted") + w.Release(txnSet) + w.log.Error("transaction set rejected", zap.Error(err)) + return nil, modules.AddContext(err, "invalid transaction set") } - // Log the success. - var outputList string - for _, output := range outputs { - outputList = outputList + "\n\tAddress: " + output.Address.String() + "\n\tValue: " + output.Value.String() + "\n" - } - w.log.Printf("INFO: successfully broadcast transaction with id %v, fee %v, and the following outputs: %v", txnSet[len(txnSet) - 1].ID(), tpoolFee, outputList) + w.s.BroadcastTransactionSet(txnSet) + w.log.Info("successfully sent amount", zap.Stringer("amount", amount), zap.Stringer("fee", fee), zap.Stringer("destination", dest)) return txnSet, nil } - -// Len returns the number of elements in the sortedOutputs struct. -func (so sortedOutputs) Len() int { - return len(so.ids) -} - -// Less returns whether element 'i' is less than element 'j'. The currency -// value of each output is used for comparison. -func (so sortedOutputs) Less(i, j int) bool { - return so.outputs[i].Value.Cmp(so.outputs[j].Value) < 0 -} - -// Swap swaps two elements in the sortedOutputs set. -func (so sortedOutputs) Swap(i, j int) { - so.ids[i], so.ids[j] = so.ids[j], so.ids[i] - so.outputs[i], so.outputs[j] = so.outputs[j], so.outputs[i] -} diff --git a/modules/wallet/offline.go b/modules/wallet/offline.go deleted file mode 100644 index 9a85ed3..0000000 --- a/modules/wallet/offline.go +++ /dev/null @@ -1,521 +0,0 @@ -package wallet - -import ( - "bytes" - "errors" - "math" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// UnspentOutputs returns the unspent outputs tracked by the wallet. -func (w *Wallet) UnspentOutputs() ([]modules.UnspentOutput, error) { - if err := w.tg.Add(); err != nil { - return nil, err - } - defer w.tg.Done() - w.mu.Lock() - defer w.mu.Unlock() - - // Ensure durability of reported outputs. - if err := w.syncDB(); err != nil { - return nil, err - } - - // Build initial list of confirmed outputs. - var outputs []modules.UnspentOutput - dbForEachSiacoinOutput(w.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { - outputs = append(outputs, modules.UnspentOutput{ - FundType: types.SpecifierSiacoinOutput, - ID: types.Hash256(scoid), - UnlockHash: sco.Address, - Value: sco.Value, - }) - }) - dbForEachSiafundOutput(w.dbTx, func(sfoid types.SiafundOutputID, sfo types.SiafundOutput, _ types.Currency) { - outputs = append(outputs, modules.UnspentOutput{ - FundType: types.SpecifierSiafundOutput, - ID: types.Hash256(sfoid), - UnlockHash: sfo.Address, - Value: types.NewCurrency(sfo.Value, 0), - }) - }) - - // Don't include outputs marked as spent in pending transactions. - pending := make(map[types.Hash256]struct{}) - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - pt := curr.txn - for _, input := range pt.Inputs { - if input.WalletAddress { - pending[input.ParentID] = struct{}{} - } - } - curr = curr.next - } - filtered := outputs[:0] - for _, o := range outputs { - if _, ok := pending[o.ID]; !ok { - filtered = append(filtered, o) - } - } - outputs = filtered - - // Set the confirmation height for each output. -outer: - for i, o := range outputs { - txnIndices, err := dbGetAddrTransactions(w.dbTx, o.UnlockHash) - if err != nil { - return nil, err - } - for _, j := range txnIndices { - pt, err := dbGetProcessedTransaction(w.dbTx, j) - if err != nil { - return nil, err - } - for _, sco := range pt.Outputs { - if sco.ID == o.ID { - outputs[i].ConfirmationHeight = pt.ConfirmationHeight - continue outer - } - } - } - } - - // Add unconfirmed outputs, except those that are spent in pending - // transactions. - curr = w.unconfirmedProcessedTransactions.head - for curr != nil { - pt := curr.txn - for _, o := range pt.Outputs { - if _, ok := pending[o.ID]; !ok && o.WalletAddress { - outputs = append(outputs, modules.UnspentOutput{ - FundType: types.SpecifierSiacoinOutput, - ID: o.ID, - UnlockHash: o.RelatedAddress, - Value: o.Value, - ConfirmationHeight: math.MaxUint64, // Unconfirmed. - }) - } - } - curr = curr.next - } - - // Mark the watch-only outputs. - for i, o := range outputs { - _, ok := w.watchedAddrs[o.UnlockHash] - outputs[i].IsWatchOnly = ok - } - - return outputs, nil -} - -// UnlockConditions returns the UnlockConditions for the specified address, if -// they are known to the wallet. -func (w *Wallet) UnlockConditions(addr types.Address) (uc types.UnlockConditions, err error) { - if err := w.tg.Add(); err != nil { - return types.UnlockConditions{}, err - } - defer w.tg.Done() - w.mu.RLock() - defer w.mu.RUnlock() - if !w.unlocked { - return types.UnlockConditions{}, modules.ErrLockedWallet - } - if sk, ok := w.keys[addr]; ok { - uc = sk.UnlockConditions - } else { - // Not in memory; try database. - uc, err = dbGetUnlockConditions(w.dbTx, addr) - if err != nil { - return types.UnlockConditions{}, errors.New("no record of UnlockConditions for that UnlockHash") - } - } - // Make a copy of the public key slice; otherwise the caller can modify it. - uc.PublicKeys = append([]types.UnlockKey(nil), uc.PublicKeys...) - return uc, nil -} - -// AddUnlockConditions adds a set of UnlockConditions to the wallet database. -func (w *Wallet) AddUnlockConditions(uc types.UnlockConditions) error { - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - w.mu.RLock() - defer w.mu.RUnlock() - if !w.unlocked { - return modules.ErrLockedWallet - } - return dbPutUnlockConditions(w.dbTx, uc) -} - -// SignTransaction signs txn using secret keys known to the wallet. The -// transaction should be complete with the exception of the Signature fields -// of each TransactionSignature referenced by toSign. For convenience, if -// toSign is empty, SignTransaction signs everything that it can. -func (w *Wallet) SignTransaction(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - // Helper function to add the required number of signatures for the SC input. - addSiacoinInputSignature := func(input types.SiacoinInput) { - pubKeys := make(map[uint64]struct{}) - for _, sig := range txn.Signatures { - if sig.ParentID == types.Hash256(input.ParentID) { - pubKeys[sig.PublicKeyIndex] = struct{}{} - } - } - for i := range input.UnlockConditions.PublicKeys { - if _, ok := pubKeys[uint64(i)]; !ok { - txn.Signatures = append(txn.Signatures, types.TransactionSignature{ - ParentID: types.Hash256(input.ParentID), - CoveredFields: cf, - PublicKeyIndex: uint64(i), - }) - } - } - } - - // Helper function to add the required number of signatures for the SF input. - addSiafundInputSignature := func(input types.SiafundInput) { - pubKeys := make(map[uint64]struct{}) - for _, sig := range txn.Signatures { - if sig.ParentID == types.Hash256(input.ParentID) { - pubKeys[sig.PublicKeyIndex] = struct{}{} - } - } - for i := range input.UnlockConditions.PublicKeys { - if _, ok := pubKeys[uint64(i)]; !ok { - txn.Signatures = append(txn.Signatures, types.TransactionSignature{ - ParentID: types.Hash256(input.ParentID), - CoveredFields: cf, - PublicKeyIndex: uint64(i), - }) - } - } - } - - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.ErrLockedWallet - } - consensusHeight, err := dbGetConsensusHeight(w.dbTx) - if err != nil { - return err - } - - // Add a signature for each input. - for _, input := range txn.SiacoinInputs { - addSiacoinInputSignature(input) - } - for _, input := range txn.SiafundInputs { - addSiafundInputSignature(input) - } - - // If toSign is empty, sign all inputs that we have keys for. - if len(toSign) == 0 { - for _, sci := range txn.SiacoinInputs { - if _, ok := w.keys[sci.UnlockConditions.UnlockHash()]; ok { - toSign = append(toSign, types.Hash256(sci.ParentID)) - } - } - for _, sfi := range txn.SiafundInputs { - if _, ok := w.keys[sfi.UnlockConditions.UnlockHash()]; ok { - toSign = append(toSign, types.Hash256(sfi.ParentID)) - } - } - } - return signTransaction(txn, w.keys, toSign, cf, consensusHeight) -} - -// SignTransaction signs txn using secret keys derived from seed. The -// transaction should be complete with the exception of the Signature fields -// of each TransactionSignature referenced by toSign, which must not be empty. -// -// SignTransaction must derive all of the keys from scratch, so it is -// appreciably slower than calling the Wallet.SignTransaction method. Only the -// first 1 million keys are derived. -func SignTransaction(txn *types.Transaction, seed modules.Seed, toSign []types.Hash256, height uint64) error { - if len(toSign) == 0 { - // Unlike the wallet method, we can't simply "sign all inputs we have - // keys for," because without generating all of the keys upfront, we - // don't know how many inputs we actually have keys for. - return errors.New("toSign cannot be empty") - } - // Generate keys in batches up to 1e6 before giving up. - keys := make(map[types.Address]spendableKey, 1e6) - var keyIndex uint64 - const keysPerBatch = 1000 - for len(keys) < 1e6 { - for _, sk := range generateKeys(seed, keyIndex, keyIndex + keysPerBatch) { - keys[sk.UnlockConditions.UnlockHash()] = sk - } - keyIndex += keysPerBatch - if err := signTransaction(txn, keys, toSign, modules.FullCoveredFields(), height); err == nil { - return nil - } - } - return signTransaction(txn, keys, toSign, modules.FullCoveredFields(), height) -} - -// signTransaction signs the specified inputs of txn using the specified keys. -// It returns an error if any of the specified inputs cannot be signed. -func signTransaction(txn *types.Transaction, keys map[types.Address]spendableKey, toSign []types.Hash256, cf types.CoveredFields, height uint64) error { - // Helper function to lookup unlock conditions in the txn associated with - // a transaction signature's ParentID. - findUnlockConditions := func(id types.Hash256) (types.UnlockConditions, bool) { - for _, sci := range txn.SiacoinInputs { - if types.Hash256(sci.ParentID) == id { - return sci.UnlockConditions, true - } - } - for _, sfi := range txn.SiafundInputs { - if types.Hash256(sfi.ParentID) == id { - return sfi.UnlockConditions, true - } - } - return types.UnlockConditions{}, false - } - // Helper function to lookup the secret key that can sign. - findSigningKey := func(uc types.UnlockConditions, pubkeyIndex uint64) (types.PrivateKey, bool) { - if pubkeyIndex >= uint64(len(uc.PublicKeys)) { - return types.PrivateKey{}, false - } - pk := uc.PublicKeys[pubkeyIndex] - sk, ok := keys[uc.UnlockHash()] - if !ok { - return types.PrivateKey{}, false - } - for _, key := range sk.SecretKeys { - pubKey := key.PublicKey() - if bytes.Equal(pk.Key, pubKey[:]) { - return key, true - } - } - return types.PrivateKey{}, false - } - - for _, id := range toSign { - // Find associated txn signature. - sigIndex := -1 - for i, sig := range txn.Signatures { - if sig.ParentID == id { - sigIndex = i - break - } - } - if sigIndex == -1 { - return errors.New("toSign references signatures not present in transaction") - } - - // Find associated input. - uc, ok := findUnlockConditions(id) - if !ok { - return errors.New("toSign references IDs not present in transaction") - } - - // Lookup the signing key. - sk, ok := findSigningKey(uc, txn.Signatures[sigIndex].PublicKeyIndex) - if !ok { - return errors.New("could not locate signing key for " + id.String()) - } - - // Add signature. - // - // NOTE: it's possible that the Signature field will already be filled - // out. Although we could save a bit of work by not signing it, in - // practice it's probably best to overwrite any existing signatures, - // since we know that ours will be valid. - txn.Signatures[sigIndex].CoveredFields = cf - sigHash := modules.SigHash(*txn, sigIndex, height) - encodedSig := sk.SignHash(sigHash) - txn.Signatures[sigIndex].Signature = encodedSig[:] - } - - return nil -} - -// AddWatchAddresses instructs the wallet to begin tracking a set of -// addresses, in addition to the addresses it was previously tracking. If none -// of the addresses have appeared in the blockchain, the unused flag may be -// set to true. Otherwise, the wallet must rescan the blockchain to search for -// transactions containing the addresses. -func (w *Wallet) AddWatchAddresses(addrs []types.Address, unused bool) error { - if err := w.tg.Add(); err != nil { - return modules.ErrWalletShutdown - } - defer w.tg.Done() - - err := func() error { - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.ErrLockedWallet - } - - // Update in-memory map. - for _, addr := range addrs { - w.watchedAddrs[addr] = struct{}{} - } - - // Update database. - alladdrs := make([]types.Address, 0, len(w.watchedAddrs)) - for addr := range w.watchedAddrs { - alladdrs = append(alladdrs, addr) - } - if err := dbPutWatchedAddresses(w.dbTx, alladdrs); err != nil { - return err - } - - if !unused { - // Prepare to rescan. - _, err := w.dbTx.Exec("DELETE FROM wt_addr") - if err != nil { - return err - } - _, err = w.dbTx.Exec("DELETE FROM wt_txn") - if err != nil { - return err - } - w.unconfirmedProcessedTransactions = processedTransactionList{} - if err := dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning); err != nil { - return err - } - if err := dbPutConsensusHeight(w.dbTx, 0); err != nil { - return err - } - } - return w.syncDB() - }() - if err != nil { - return err - } - - if !unused { - // Rescan the blockchain. - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - - done := make(chan struct{}) - go w.rescanMessage(done) - defer close(done) - if err := w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning, w.tg.StopChan()); err != nil { - return err - } - w.tpool.TransactionPoolSubscribe(w) - } - - return nil -} - -// RemoveWatchAddresses instructs the wallet to stop tracking a set of -// addresses and delete their associated transactions. If none of the -// addresses have appeared in the blockchain, the unused flag may be set to -// true. Otherwise, the wallet must rescan the blockchain to rebuild its -// transaction history. -func (w *Wallet) RemoveWatchAddresses(addrs []types.Address, unused bool) error { - if err := w.tg.Add(); err != nil { - return modules.ErrWalletShutdown - } - defer w.tg.Done() - - err := func() error { - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.ErrLockedWallet - } - - // Update in-memory map. - for _, addr := range addrs { - delete(w.watchedAddrs, addr) - } - - // Update database. - alladdrs := make([]types.Address, 0, len(w.watchedAddrs)) - for addr := range w.watchedAddrs { - alladdrs = append(alladdrs, addr) - } - if err := dbPutWatchedAddresses(w.dbTx, alladdrs); err != nil { - return err - } - - if !unused { - // Outputs associated with the addresses may be present in the - // SiacoinOutputs bucket. Iterate through the bucket and remove - // any outputs that we are no longer watching. - var outputIDs []types.SiacoinOutputID - dbForEachSiacoinOutput(w.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { - if !w.isWalletAddress(sco.Address) { - outputIDs = append(outputIDs, scoid) - } - }) - for _, scoid := range outputIDs { - if err := dbDeleteSiacoinOutput(w.dbTx, scoid); err != nil { - return err - } - } - - // Prepare to rescan. - _, err := w.dbTx.Exec("DELETE FROM wt_addr") - if err != nil { - return err - } - _, err = w.dbTx.Exec("DELETE FROM wt_txn") - if err != nil { - return err - } - w.unconfirmedProcessedTransactions = processedTransactionList{} - if err := dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning); err != nil { - return err - } - if err := dbPutConsensusHeight(w.dbTx, 0); err != nil { - return err - } - } - return w.syncDB() - }() - if err != nil { - return err - } - - if !unused { - // Rescan the blockchain. - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - - done := make(chan struct{}) - go w.rescanMessage(done) - defer close(done) - if err := w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning, w.tg.StopChan()); err != nil { - return err - } - w.tpool.TransactionPoolSubscribe(w) - } - - return nil -} - -// WatchAddresses returns the set of addresses that the wallet is currently -// watching. -func (w *Wallet) WatchAddresses() ([]types.Address, error) { - if err := w.tg.Add(); err != nil { - return nil, modules.ErrWalletShutdown - } - defer w.tg.Done() - - w.mu.RLock() - defer w.mu.RUnlock() - - addrs := make([]types.Address, 0, len(w.watchedAddrs)) - for addr := range w.watchedAddrs { - addrs = append(addrs, addr) - } - return addrs, nil -} diff --git a/modules/wallet/persist.go b/modules/wallet/persist.go deleted file mode 100644 index d55e084..0000000 --- a/modules/wallet/persist.go +++ /dev/null @@ -1,121 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - "path/filepath" - - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - - "go.sia.tech/core/types" - - "lukechampine.com/frand" -) - -const ( - logFile = "wallet.log" -) - -// encryptedSpendableKey stores an encrypted spendable key on disk. -type encryptedSpendableKey struct { - Salt walletSalt - EncryptionVerification modules.WalletKey - SpendableKey []byte -} - -// EncodeTo implements types.EncoderTo. -func (uk *encryptedSpendableKey) EncodeTo(e *types.Encoder) { - e.Write(uk.Salt[:]) - e.WriteBytes(uk.EncryptionVerification) - e.WriteBytes(uk.SpendableKey) -} - -// DecodeFrom implements types.DecoderFrom. -func (uk *encryptedSpendableKey) DecodeFrom(d *types.Decoder) { - d.Read(uk.Salt[:]) - uk.EncryptionVerification = d.ReadBytes() - uk.SpendableKey = d.ReadBytes() -} - -// openDB loads the set database and populates it with the necessary data. -func (w *Wallet) openDB() (err error) { - // Initialize the database. - tx, err := w.db.Begin() - if err != nil { - return fmt.Errorf("unable to start transaction: %v", err) - } - - // If the wallet does not have a UID, create one. - if (dbGetWalletSalt(tx) == walletSalt{}) { - if err = dbReset(tx); err != nil { - tx.Rollback() - return fmt.Errorf("couldn't reset wallet: %v", err) - } - var uid walletSalt - frand.Read(uid[:]) - if err = dbPutWalletSalt(tx, uid); err != nil { - tx.Rollback() - return fmt.Errorf("couldn't save wallet UID: %v", err) - } - } - - // Check whether wallet is encrypted. - encrypted, err := dbGetEncryptedVerification(tx) - if err != nil { - tx.Rollback() - return fmt.Errorf("couldn't check wallet encryption: %v", err) - } - w.encrypted = encrypted != nil - - return tx.Commit() -} - -// initPersist loads all of the wallet's persistence into memory. -func (w *Wallet) initPersist(dir string) error { - // Start logging. - var err error - w.log, err = persist.NewFileLogger(filepath.Join(dir, logFile)) - if err != nil { - return err - } - w.tg.AfterStop(func() { - w.log.Close() - }) - - // Open the database. - err = w.openDB() - if err != nil { - return err - } - - // Begin the initial transaction. - w.dbTx, err = w.db.Begin() - if err != nil { - w.log.Critical("ERROR: failed to start database update:", err) - return err - } - - // Ensure that the final db transaction is committed when the wallet closes. - w.tg.AfterStop(func() { - w.mu.Lock() - defer w.mu.Unlock() - - var err error - if w.dbRollback { - // Rollback txn if necessry. - err = errors.New("database unable to sync - rollback requested") - err = modules.ComposeErrors(err, w.dbTx.Rollback()) - } else { - // Else commit the transaction. - err = w.dbTx.Commit() - } - if err != nil { - w.log.Severe("ERROR: failed to apply database update:", err) - } - }) - - // Spawn a goroutine to commit the db transaction at regular intervals. - go w.threadedDBUpdate() - return nil -} diff --git a/modules/wallet/processedtransactions.go b/modules/wallet/processedtransactions.go deleted file mode 100644 index 7c7e61f..0000000 --- a/modules/wallet/processedtransactions.go +++ /dev/null @@ -1,31 +0,0 @@ -package wallet - -import ( - "github.com/mike76-dev/sia-satellite/modules" -) - -// processedTransactionNode is a single node in the list that points -// to the next node. -type processedTransactionNode struct { - txn modules.ProcessedTransaction - next *processedTransactionNode -} - -// processedTransactionList is a singly unsorted linked list of -// modules.ProcessedTransaction elements. It is more performant than -// the previously used array. -type processedTransactionList struct { - head *processedTransactionNode - tail *processedTransactionNode -} - -// add adds a new modules.ProcessedTransaction to the list. -func (ptl *processedTransactionList) add(pt modules.ProcessedTransaction) { - node := &processedTransactionNode{txn: pt} - if ptl.head == nil { - ptl.head = node - } else { - ptl.tail.next = node - } - ptl.tail = node -} diff --git a/modules/wallet/scan.go b/modules/wallet/scan.go index 22fc7ea..6322059 100644 --- a/modules/wallet/scan.go +++ b/modules/wallet/scan.go @@ -4,12 +4,11 @@ import ( "fmt" "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/persist" - "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" ) -const scanMultiplier = 4 // How many more keys to generate after each scan iteration. +const scanMultiplier = 4 // how many more keys to generate after each scan iteration var errMaxKeys = fmt.Errorf("refused to generate more than %v keys from seed", maxScanKeys) @@ -32,15 +31,13 @@ type scannedOutput struct { // A seedScanner scans the blockchain for addresses that belong to a given // seed. type seedScanner struct { - dustThreshold types.Currency // Minimum value of outputs to be included. - keys map[types.Address]uint64 // Map address to seed index. - largestIndexSeen uint64 // Largest index that has appeared in the blockchain. - scannedHeight uint64 + dustThreshold types.Currency // minimum value of outputs to be included + keys map[types.Address]uint64 // map address to seed index + largestIndexSeen uint64 // largest index that has appeared in the blockchain + tip types.ChainIndex seed modules.Seed siacoinOutputs map[types.SiacoinOutputID]scannedOutput siafundOutputs map[types.SiafundOutputID]scannedOutput - - log *persist.Logger } func (s *seedScanner) numKeys() uint64 { @@ -50,83 +47,100 @@ func (s *seedScanner) numKeys() uint64 { // generateKeys generates n additional keys from the seedScanner's seed. func (s *seedScanner) generateKeys(n uint64) { initialProgress := s.numKeys() - for i, k := range generateKeys(s.seed, initialProgress, n) { - s.keys[k.UnlockConditions.UnlockHash()] = initialProgress + uint64(i) + for i, key := range generateKeys(s.seed, initialProgress, n) { + s.keys[types.StandardUnlockHash(key.PublicKey())] = initialProgress + uint64(i) } } -// ProcessConsensusChange scans the blockchain for information relevant to the -// seedScanner. -func (s *seedScanner) ProcessConsensusChange(cc modules.ConsensusChange) { - // Update outputs. - for _, diff := range cc.SiacoinOutputDiffs { - if diff.Direction == modules.DiffApply { - if index, exists := s.keys[diff.SiacoinOutput.Address]; exists && diff.SiacoinOutput.Value.Cmp(s.dustThreshold) > 0 { - s.siacoinOutputs[diff.ID] = scannedOutput{ - id: types.Hash256(diff.ID), - value: diff.SiacoinOutput.Value, - seedIndex: index, +// UpdateChainState applies and reverts the ChainManager updates. +func (s *seedScanner) UpdateChainState(reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error { + for _, cru := range reverted { + cru.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + index, exists := s.keys[sce.SiacoinOutput.Address] + if exists { + if !spent { + delete(s.siacoinOutputs, types.SiacoinOutputID(sce.ID)) + } else if sce.SiacoinOutput.Value.Cmp(s.dustThreshold) > 0 { + s.siacoinOutputs[types.SiacoinOutputID(sce.ID)] = scannedOutput{ + id: sce.ID, + value: sce.SiacoinOutput.Value, + seedIndex: index, + } } - } - } else if diff.Direction == modules.DiffRevert { - // NOTE: DiffRevert means the output was either spent or was in a - // block that was reverted. - if _, exists := s.keys[diff.SiacoinOutput.Address]; exists { - delete(s.siacoinOutputs, diff.ID) - } - } - } - for _, diff := range cc.SiafundOutputDiffs { - if diff.Direction == modules.DiffApply { - // Do not compare against dustThreshold here; we always want to - // sweep every Siafund found. - if index, exists := s.keys[diff.SiafundOutput.Address]; exists { - s.siafundOutputs[diff.ID] = scannedOutput{ - id: types.Hash256(diff.ID), - value: types.NewCurrency(diff.SiafundOutput.Value, 0), - seedIndex: index, + if index > s.largestIndexSeen { + s.largestIndexSeen = index } } - } else if diff.Direction == modules.DiffRevert { - // NOTE: DiffRevert means the output was either spent or was in a - // block that was reverted. - if _, exists := s.keys[diff.SiafundOutput.Address]; exists { - delete(s.siafundOutputs, diff.ID) + }) + + cru.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + index, exists := s.keys[sfe.SiafundOutput.Address] + if exists { + if !spent { + delete(s.siafundOutputs, types.SiafundOutputID(sfe.ID)) + } else { + s.siafundOutputs[types.SiafundOutputID(sfe.ID)] = scannedOutput{ + id: sfe.ID, + value: types.NewCurrency(sfe.SiafundOutput.Value, 0), + seedIndex: index, + } + } + if index > s.largestIndexSeen { + s.largestIndexSeen = index + } } - } + }) } - // Update s.largestIndexSeen. - for _, diff := range cc.SiacoinOutputDiffs { - index, exists := s.keys[diff.SiacoinOutput.Address] - if exists { - if index > s.largestIndexSeen { - s.largestIndexSeen = index + for _, cau := range applied { + cau.ForEachSiacoinElement(func(sce types.SiacoinElement, spent bool) { + index, exists := s.keys[sce.SiacoinOutput.Address] + if exists { + if spent { + delete(s.siacoinOutputs, types.SiacoinOutputID(sce.ID)) + } else if sce.SiacoinOutput.Value.Cmp(s.dustThreshold) > 0 { + s.siacoinOutputs[types.SiacoinOutputID(sce.ID)] = scannedOutput{ + id: sce.ID, + value: sce.SiacoinOutput.Value, + seedIndex: index, + } + } + if index > s.largestIndexSeen { + s.largestIndexSeen = index + } } - } - } - for _, diff := range cc.SiafundOutputDiffs { - index, exists := s.keys[diff.SiafundOutput.Address] - if exists { - if index > s.largestIndexSeen { - s.largestIndexSeen = index + }) + + cau.ForEachSiafundElement(func(sfe types.SiafundElement, spent bool) { + index, exists := s.keys[sfe.SiafundOutput.Address] + if exists { + if spent { + delete(s.siafundOutputs, types.SiafundOutputID(sfe.ID)) + } else { + s.siafundOutputs[types.SiafundOutputID(sfe.ID)] = scannedOutput{ + id: sfe.ID, + value: types.NewCurrency(sfe.SiafundOutput.Value, 0), + seedIndex: index, + } + } + if index > s.largestIndexSeen { + s.largestIndexSeen = index + } } - } + }) } - // Adjust the scanned height and print the scan progress. - s.scannedHeight = cc.BlockHeight - if !cc.Synced { - fmt.Printf("\rWallet: scanned to height %d...", s.scannedHeight) - } else { - fmt.Println("\nDone!") + if len(applied) > 0 { + s.tip = applied[len(applied)-1].State.Index } + + return nil } -// scan subscribes s to cs and scans the blockchain for addresses that belong -// to s's seed. If scan returns errMaxKeys, additional keys may need to be -// generated to find all the addresses. -func (s *seedScanner) scan(cs modules.ConsensusSet, cancel <-chan struct{}) error { +// scan subscribes scans the blockchain for addresses that belong to s's seed. +// If scan returns errMaxKeys, additional keys may need to be generated to +// find all the addresses. +func (s *seedScanner) scan(cm *chain.Manager, stopChan <-chan struct{}) error { // Generate a bunch of keys and scan the blockchain looking for them. If // none of the 'upper' half of the generated keys are found, we are done; // otherwise, generate more keys and try again (bounded by a sane @@ -134,24 +148,37 @@ func (s *seedScanner) scan(cs modules.ConsensusSet, cancel <-chan struct{}) erro // // NOTE: since scanning is very slow, we aim to only scan once, which // means generating many keys. + fmt.Println("Wallet: starting scan...") numKeys := numInitialKeys for s.numKeys() < maxScanKeys { s.generateKeys(numKeys) // Reset scan height between scans. - s.scannedHeight = 0 - if err := cs.ConsensusSetSubscribe(s, modules.ConsensusChangeBeginning, cancel); err != nil { - return err + s.tip = types.ChainIndex{} + for s.tip != cm.Tip() { + select { + case <-stopChan: + return nil + default: + } + crus, caus, err := cm.UpdatesSince(s.tip, 100) + if err != nil { + return modules.AddContext(err, "failed to subscribe to chain manager") + } + if err := s.UpdateChainState(crus, caus); err != nil { + return modules.AddContext(err, "failed to update chain state") + } + fmt.Printf("Wallet: scanned to height %d...\n", s.tip.Height) } - cs.Unsubscribe(s) - if s.largestIndexSeen < s.numKeys() / 2 { + + if s.largestIndexSeen < s.numKeys()/2 { return nil } // Increase number of keys generated each iteration, capping so that // we do not exceed maxScanKeys. numKeys *= scanMultiplier - if numKeys > maxScanKeys - s.numKeys() { + if numKeys > maxScanKeys-s.numKeys() { numKeys = maxScanKeys - s.numKeys() } } @@ -159,13 +186,12 @@ func (s *seedScanner) scan(cs modules.ConsensusSet, cancel <-chan struct{}) erro } // newSeedScanner returns a new seedScanner. -func newSeedScanner(seed modules.Seed, log *persist.Logger) *seedScanner { +func newSeedScanner(seed modules.Seed, dustThreshold types.Currency) *seedScanner { return &seedScanner{ seed: seed, + dustThreshold: dustThreshold, keys: make(map[types.Address]uint64, numInitialKeys), siacoinOutputs: make(map[types.SiacoinOutputID]scannedOutput), siafundOutputs: make(map[types.SiafundOutputID]scannedOutput), - - log: log, } } diff --git a/modules/wallet/seed.go b/modules/wallet/seed.go index 05b0856..f63d431 100644 --- a/modules/wallet/seed.go +++ b/modules/wallet/seed.go @@ -1,74 +1,43 @@ package wallet import ( - "database/sql" - "encoding/binary" - "errors" + "crypto/ed25519" "runtime" "sync" "github.com/mike76-dev/sia-satellite/modules" - "golang.org/x/crypto/blake2b" - "go.sia.tech/core/types" - + "go.uber.org/zap" "lukechampine.com/frand" ) -var ( - errKnownSeed = errors.New("seed is already known") -) +const ( + // lookaheadBuffer together with lookaheadRescanThreshold defines the constant part + // of the maxLookahead. + lookaheadBuffer = uint64(4000) -type ( - // walletSalt is a randomly generated salt put at the front of every - // persistence object. It is used to make sure that a different encryption - // key can be used for every persistence object. - walletSalt [32]byte - - // encryptedSeed stores an encrypted wallet seed on disk. - encryptedSeed struct { - UID walletSalt - EncryptionVerification []byte - Seed []byte - } + // lookaheadRescanThreshold is the number of keys in the lookahead that will be + // generated before a complete wallet rescan is initialized. + lookaheadRescanThreshold = uint64(1000) ) -// generateSpendableKey creates the keys and unlock conditions for seed at a -// given index. -func generateSpendableKey(seed modules.Seed, index uint64) spendableKey { - h := blake2b.Sum256(seed[:]) - buf := make([]byte, 32+8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], index) - s := blake2b.Sum256(buf) - sk := types.NewPrivateKeyFromSeed(s[:]) - pk := sk.PublicKey() - return spendableKey{ - UnlockConditions: types.UnlockConditions{ - PublicKeys: []types.UnlockKey{{ - Algorithm: types.SpecifierEd25519, - Key: pk[:], - }}, - SignaturesRequired: 1, - }, - SecretKeys: []types.PrivateKey{types.PrivateKey(sk)}, - } +// maxLookahead returns the size of the lookahead for a given seed progress +// which usually is the current primarySeedProgress. +func maxLookahead(start uint64) uint64 { + return start + lookaheadRescanThreshold + lookaheadBuffer + start/10 } // generateKeys generates n keys from seed, starting from index start. -func generateKeys(seed modules.Seed, start, n uint64) []spendableKey { +func generateKeys(seed modules.Seed, start, n uint64) []types.PrivateKey { // Generate in parallel, one goroutine per core. - keys := make([]spendableKey, n) + keys := make([]types.PrivateKey, n) var wg sync.WaitGroup wg.Add(runtime.NumCPU()) for cpu := 0; cpu < runtime.NumCPU(); cpu++ { go func(offset uint64) { defer wg.Done() for i := offset; i < n; i += uint64(runtime.NumCPU()) { - // NOTE: don't bother trying to optimize generateSpendableKey; - // profiling shows that ed25519 key generation consumes far - // more CPU time than encoding or hashing. - keys[i] = generateSpendableKey(seed, start+i) + keys[i] = modules.KeyFromSeed(&seed, start+i) } }(uint64(cpu)) } @@ -76,40 +45,16 @@ func generateKeys(seed modules.Seed, start, n uint64) []spendableKey { return keys } -// createSeed creates and encrypts a seed. -func createSeed(masterKey modules.WalletKey, seed modules.Seed) encryptedSeed { - var es encryptedSeed - frand.Read(es.UID[:]) - sek := saltedEncryptionKey(masterKey, es.UID) - es.EncryptionVerification, _ = modules.Encrypt(sek, verificationPlaintext) - buf := make([]byte, 32) - copy(buf[:16], seed[:]) - binary.LittleEndian.PutUint64(buf[16:24], 0) - binary.LittleEndian.PutUint64(buf[24:], 0) - es.Seed, _ = modules.Encrypt(sek, buf) - frand.Read(buf[:]) - return es -} - -// decryptSeed decrypts a seed using the encryption key. -func decryptSeed(masterKey modules.WalletKey, es encryptedSeed) (seed modules.Seed, err error) { - // Verify that the provided master key is the correct key. - decryptionKey := saltedEncryptionKey(masterKey, es.UID) - err = verifyEncryption(decryptionKey, es.EncryptionVerification) - if err != nil { - return modules.Seed{}, err - } - - // Decrypt and return the seed. - plainSeed, err := modules.Decrypt(decryptionKey, es.Seed) - if err != nil { - return modules.Seed{}, modules.AddContext(err, "failed to decrypt seed") +func (w *Wallet) generate(index uint64) { + for index > uint64(len(w.keys)) { + key := modules.KeyFromSeed(&w.seed, uint64(len(w.keys))) + addr := types.StandardUnlockHash(key.PublicKey()) + w.keys[addr] = key + if err := w.insertAddress(addr); err != nil { + w.log.Error("couldn't insert address", zap.Error(err)) + return + } } - - copy(seed[:], plainSeed[:16]) - frand.Read(plainSeed[:]) - - return seed, nil } // regenerateLookahead creates future keys up to a maximum of maxKeys keys. @@ -118,26 +63,13 @@ func (w *Wallet) regenerateLookahead(start uint64) { maxKeys := maxLookahead(start) existingKeys := uint64(len(w.lookahead)) - for i, k := range generateKeys(w.primarySeed, start+existingKeys, maxKeys-existingKeys) { - w.lookahead[k.UnlockConditions.UnlockHash()] = start + existingKeys + uint64(i) + for i, key := range generateKeys(w.seed, start+existingKeys, maxKeys-existingKeys) { + w.lookahead[types.StandardUnlockHash(key.PublicKey())] = start + existingKeys + uint64(i) } } -// integrateSeed generates n spendableKeys from the seed and loads them into -// the wallet. -func (w *Wallet) integrateSeed(seed modules.Seed, n uint64) { - for _, sk := range generateKeys(seed, 0, n) { - w.keys[sk.UnlockConditions.UnlockHash()] = sk - } -} - -// nextPrimarySeedAddress fetches the next n addresses from the primary seed. -func (w *Wallet) nextPrimarySeedAddresses(tx *sql.Tx, n uint64) ([]types.UnlockConditions, error) { - // Check that the wallet has been unlocked. - if !w.unlocked { - return []types.UnlockConditions{}, modules.ErrLockedWallet - } - +// nextAddresses fetches the next n addresses from the primary seed. +func (w *Wallet) nextAddresses(n uint64) ([]types.UnlockConditions, error) { // Check how many unused addresses we have available. neededUnused := uint64(len(w.unusedKeys)) if neededUnused > n { @@ -151,25 +83,31 @@ func (w *Wallet) nextPrimarySeedAddresses(tx *sql.Tx, n uint64) ([]types.UnlockC var ucs []types.UnlockConditions if n > 0 { // Fetch and increment the seed progress. - progress, err := dbGetPrimarySeedProgress(tx) + progress, err := w.getSeedProgress() if err != nil { - return []types.UnlockConditions{}, err + return nil, err } - if err = dbPutPrimarySeedProgress(tx, progress+n); err != nil { - return []types.UnlockConditions{}, err + if err = w.putSeedProgress(progress + n); err != nil { + return nil, err } // Integrate the next keys into the wallet, and return the unlock // conditions. Also remove new keys from the future keys and update them - // according to new progress - spendableKeys := generateKeys(w.primarySeed, progress, n) - ucs = make([]types.UnlockConditions, 0, len(spendableKeys)) - for _, spendableKey := range spendableKeys { - w.keys[spendableKey.UnlockConditions.UnlockHash()] = spendableKey - delete(w.lookahead, spendableKey.UnlockConditions.UnlockHash()) - ucs = append(ucs, spendableKey.UnlockConditions) + // according to new progress. + keys := generateKeys(w.seed, progress, n) + ucs = make([]types.UnlockConditions, 0, len(keys)) + for _, key := range keys { + w.keys[types.StandardUnlockHash(key.PublicKey())] = key + if err := w.insertAddress(types.StandardUnlockHash(key.PublicKey())); err != nil { + return nil, err + } + delete(w.lookahead, types.StandardUnlockHash(key.PublicKey())) + ucs = append(ucs, types.StandardUnlockConditions(key.PublicKey())) } w.regenerateLookahead(progress + n) + if err := w.save(); err != nil { + return nil, err + } } // Add as many unused UCs as necessary. @@ -192,76 +130,28 @@ func (w *Wallet) nextPrimarySeedAddresses(tx *sql.Tx, n uint64) ([]types.UnlockC return ucs, nil } -// nextPrimarySeedAddress fetches the next address from the primary seed. -func (w *Wallet) nextPrimarySeedAddress(tx *sql.Tx) (types.UnlockConditions, error) { - ucs, err := w.nextPrimarySeedAddresses(tx, 1) +// nextAddress fetches the next address from the seed. +func (w *Wallet) nextAddress() (types.UnlockConditions, error) { + ucs, err := w.nextAddresses(1) if err != nil { return types.UnlockConditions{}, err } return ucs[0], nil } -// AllSeeds returns a list of all seeds known to and used by the wallet. -func (w *Wallet) AllSeeds() ([]modules.Seed, error) { - if err := w.tg.Add(); err != nil { - return nil, modules.ErrWalletShutdown - } - defer w.tg.Done() - - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return nil, modules.ErrLockedWallet - } - return append([]modules.Seed{w.primarySeed}, w.seeds...), nil -} - -// PrimarySeed returns the decrypted primary seed of the wallet, as well as -// the number of addresses that the seed can be safely used to generate. -func (w *Wallet) PrimarySeed() (modules.Seed, uint64, error) { - if err := w.tg.Add(); err != nil { - return modules.Seed{}, 0, modules.ErrWalletShutdown - } - defer w.tg.Done() - - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.Seed{}, 0, modules.ErrLockedWallet - } - progress, err := dbGetPrimarySeedProgress(w.dbTx) - if err != nil { - return modules.Seed{}, 0, err - } - - // Addresses remaining is maxScanKeys-progress; generating more keys than - // that risks not being able to recover them when using SweepSeed or - // InitFromSeed. - remaining := maxScanKeys - progress - if progress > maxScanKeys { - remaining = 0 - } - return w.primarySeed, remaining, nil -} - // MarkAddressUnused marks the provided address as unused which causes it to be // handed out by a subsequent call to `NextAddresses` again. func (w *Wallet) MarkAddressUnused(addrs ...types.UnlockConditions) error { if err := w.tg.Add(); err != nil { - return modules.ErrWalletShutdown + return err } defer w.tg.Done() - w.managedMarkAddressUnused(addrs...) - return nil -} - -// managedMarkAddressUnused marks the provided address as unused which causes it -// to be handed out by a subsequent call to `NextAddresses` again. -func (w *Wallet) managedMarkAddressUnused(addrs ...types.UnlockConditions) { w.mu.Lock() defer w.mu.Unlock() w.markAddressUnused(addrs...) + + return nil } // markAddressUnused marks the provided address as unused which causes it @@ -273,23 +163,22 @@ func (w *Wallet) markAddressUnused(addrs ...types.UnlockConditions) { } // NextAddresses returns n unlock hashes that are ready to receive Siacoins or -// Siafunds. The addresses are generated using the primary address seed. +// Siafunds. // // Warning: If this function is used to generate large numbers of addresses, // those addresses should be used. Otherwise the lookahead might not be able to // keep up and multiple wallets with the same seed might desync. func (w *Wallet) NextAddresses(n uint64) ([]types.UnlockConditions, error) { if err := w.tg.Add(); err != nil { - return nil, modules.ErrWalletShutdown + return nil, err } defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() - // Generate some keys and sync the db. - ucs, err := w.nextPrimarySeedAddresses(w.dbTx, n) - err = modules.ComposeErrors(err, w.syncDB()) + // Generate some keys. + ucs, err := w.nextAddresses(n) if err != nil { return nil, err } @@ -298,10 +187,10 @@ func (w *Wallet) NextAddresses(n uint64) ([]types.UnlockConditions, error) { } // NextAddress returns an unlock hash that is ready to receive Siacoins or -// Siafunds. The address is generated using the primary address seed. +// Siafunds. func (w *Wallet) NextAddress() (types.UnlockConditions, error) { if err := w.tg.Add(); err != nil { - return types.UnlockConditions{}, modules.ErrWalletShutdown + return types.UnlockConditions{}, err } defer w.tg.Done() @@ -312,322 +201,32 @@ func (w *Wallet) NextAddress() (types.UnlockConditions, error) { return ucs[0], nil } -// LoadSeed will track all of the addresses generated by the input seed, -// reclaiming any funds that were lost due to a deleted seed or lost encryption -// key. An error will be returned if the seed has already been integrated with -// the wallet. -func (w *Wallet) LoadSeed(masterKey modules.WalletKey, seed modules.Seed) error { - if err := w.tg.Add(); err != nil { - return modules.ErrWalletShutdown - } - defer w.tg.Done() - - if !w.cs.Synced() { - return errors.New("cannot load seed until blockchain is synced") - } - - if !w.scanLock.TryLock() { - return errScanInProgress - } - defer w.scanLock.Unlock() - - // Because the recovery seed does not have a UID, duplication must be - // prevented by comparing with the list of decrypted seeds. This can only - // occur while the wallet is unlocked. - w.mu.RLock() - if !w.unlocked { - w.mu.RUnlock() - return modules.ErrLockedWallet - } - for _, wSeed := range append([]modules.Seed{w.primarySeed}, w.seeds...) { - if seed == wSeed { - w.mu.RUnlock() - return errKnownSeed - } - } - w.mu.RUnlock() - - // Scan blockchain to determine how many keys to generate for the seed. - s := newSeedScanner(seed, w.log) - if err := s.scan(w.cs, w.tg.StopChan()); err != nil { - return err - } - - // Add 4% as a buffer because the seed may have addresses in the wild - // that have not appeared in the blockchain yet. - seedProgress := s.largestIndexSeen + 500 - seedProgress += seedProgress / 25 - w.log.Printf("INFO: found key index %v in blockchain. Setting auxiliary seed progress to %v", s.largestIndexSeen, seedProgress) - - err := func() error { - w.mu.Lock() - defer w.mu.Unlock() - - err := checkMasterKey(w.dbTx, masterKey) - if err != nil { - return err - } - - // Create an encrypted seed. - es := createSeed(masterKey, seed) - - // Add the encrypted seed. - current, err := dbGetAuxiliarySeeds(w.dbTx) - if err != nil { - return err - } - err = dbPutAuxiliarySeeds(w.dbTx, append(current, es)) - if err != nil { - return err - } - - // Load the seed's keys. - w.integrateSeed(seed, seedProgress) - w.seeds = append(w.seeds, seed) - - // Delete the set of processed transactions; they will be recreated - // when we rescan. - _, err = w.dbTx.Exec("DELETE FROM wt_addr") - if err != nil { - return err - } - _, err = w.dbTx.Exec("DELETE FROM wt_txn") - if err != nil { - return err - } - w.unconfirmedProcessedTransactions = processedTransactionList{} - - // Reset the consensus change ID and height in preparation for rescan. - err = dbPutConsensusChangeID(w.dbTx, modules.ConsensusChangeBeginning) - if err != nil { - return err - } - return dbPutConsensusHeight(w.dbTx, 0) - }() - if err != nil { - return err - } - - // Rescan the blockchain. - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - - done := make(chan struct{}) - go w.rescanMessage(done) - defer close(done) - - err = w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning, w.tg.StopChan()) - if err != nil { - return err - } - w.tpool.TransactionPoolSubscribe(w) - return nil +// ownsAddress returns true if the provided address belongs to the wallet. +func (w *Wallet) ownsAddress(addr types.Address) bool { + _, exists := w.keys[addr] + return exists } -// SweepSeed scans the blockchain for outputs generated from seed and creates -// a transaction that transfers them to the wallet. Note that this incurs a -// transaction fee. It returns the total value of the outputs, minus the fee. -// If only Siafunds were found, the fee is deducted from the wallet. -func (w *Wallet) SweepSeed(seed modules.Seed) (coins types.Currency, funds uint64, err error) { - if err = w.tg.Add(); err != nil { - return types.Currency{}, 0, modules.ErrWalletShutdown - } - defer w.tg.Done() - - if !w.scanLock.TryLock() { - return types.Currency{}, 0, errScanInProgress - } - defer w.scanLock.Unlock() - - w.mu.RLock() - match := seed == w.primarySeed - w.mu.RUnlock() - if match { - return types.Currency{}, 0, errors.New("cannot sweep primary seed") - } - - if !w.cs.Synced() { - return types.Currency{}, 0, errors.New("cannot sweep until blockchain is synced") - } - - // Get an address to spend into. +// Addresses returns the addresses of the wallet. +func (w *Wallet) Addresses() (addrs []types.Address) { w.mu.Lock() - uc, err := w.nextPrimarySeedAddress(w.dbTx) - height, err2 := dbGetConsensusHeight(w.dbTx) - w.mu.Unlock() - if err != nil { - return types.Currency{}, 0, err - } - if err2 != nil { - return types.Currency{}, 0, err2 - } - defer func() { - if err != nil { - w.managedMarkAddressUnused(uc) - } - }() - - // Scan blockchain for outputs, filtering out 'dust' (outputs that cost - // more in fees than they are worth). - s := newSeedScanner(seed, w.log) - _, maxFee := w.tpool.FeeEstimation() - const outputSize = 350 // Approx. size in bytes of an output and accompanying signature. - const maxOutputs = 50 // Approx. number of outputs that a transaction can handle. - s.dustThreshold = maxFee.Mul64(outputSize) - if err = s.scan(w.cs, w.tg.StopChan()); err != nil { - return - } - - if len(s.siacoinOutputs) == 0 && len(s.siafundOutputs) == 0 { - // If we aren't sweeping any coins or funds, then just return an - // error; no reason to proceed. - return types.Currency{}, 0, errors.New("nothing to sweep") - } + defer w.mu.Unlock() - // Flatten map to slice. - var siacoinOutputs, siafundOutputs []scannedOutput - for _, sco := range s.siacoinOutputs { - siacoinOutputs = append(siacoinOutputs, sco) + for addr := range w.addrs { + addrs = append(addrs, addr) } - for _, sfo := range s.siafundOutputs { - siafundOutputs = append(siafundOutputs, sfo) - } - - for len(siacoinOutputs) > 0 || len(siafundOutputs) > 0 { - // Process up to maxOutputs siacoinOutputs. - txnSiacoinOutputs := make([]scannedOutput, maxOutputs) - n := copy(txnSiacoinOutputs, siacoinOutputs) - txnSiacoinOutputs = txnSiacoinOutputs[:n] - siacoinOutputs = siacoinOutputs[n:] - - // Process up to (maxOutputs-n) siafundOutputs. - txnSiafundOutputs := make([]scannedOutput, maxOutputs-n) - n = copy(txnSiafundOutputs, siafundOutputs) - txnSiafundOutputs = txnSiafundOutputs[:n] - siafundOutputs = siafundOutputs[n:] - - var txnCoins types.Currency - var txnFunds uint64 - - // Construct a transaction that spends the outputs. - var txn types.Transaction - var sweptCoins types.Currency // Total value of swept Siacoin outputs. - var sweptFunds uint64 // Total value of swept Siafund outputs. - for _, output := range txnSiacoinOutputs { - // Construct a Siacoin input that spends the output. - sk := generateSpendableKey(seed, output.seedIndex) - txn.SiacoinInputs = append(txn.SiacoinInputs, types.SiacoinInput{ - ParentID: types.SiacoinOutputID(output.id), - UnlockConditions: sk.UnlockConditions, - }) - sweptCoins = sweptCoins.Add(output.value) - } - for _, output := range txnSiafundOutputs { - // Construct a Siafund input that spends the output. - sk := generateSpendableKey(seed, output.seedIndex) - txn.SiafundInputs = append(txn.SiafundInputs, types.SiafundInput{ - ParentID: types.SiafundOutputID(output.id), - UnlockConditions: sk.UnlockConditions, - }) - sweptFunds = sweptFunds + output.value.Lo - } - - // Estimate the transaction size and fee. NOTE: this equation doesn't - // account for other fields in the transaction, but since we are - // multiplying by maxFee, lowballing is ok. - estTxnSize := (len(txnSiacoinOutputs) + len(txnSiafundOutputs)) * outputSize - estFee := maxFee.Mul64(uint64(estTxnSize)) - txn.MinerFees = []types.Currency{estFee} - - // Calculate total Siacoin payout. - if sweptCoins.Cmp(estFee) > 0 { - txnCoins = sweptCoins.Sub(estFee) - } - txnFunds = sweptFunds - - var parents []types.Transaction - switch { - case txnCoins.IsZero() && txnFunds == 0: - // If we aren't sweeping any coins or funds, then just return an - // error; no reason to proceed. - return types.Currency{}, 0, errors.New("transaction fee exceeds value of swept outputs") - - case !txnCoins.IsZero() && txnFunds == 0: - // If we're sweeping coins but not funds, add a Siacoin output for - // them. - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: txnCoins, - Address: uc.UnlockHash(), - }) - - case txnCoins.IsZero() && txnFunds != 0: - // If we're sweeping funds but not coins, add a Siafund output for - // them. This is tricky because we still need to pay for the - // transaction fee, but we can't simply subtract the fee from the - // output value like we can with swept coins. Instead, we need to fund - // the fee using the existing wallet balance. - txn.SiafundOutputs = append(txn.SiafundOutputs, types.SiafundOutput{ - Value: txnFunds, - Address: uc.UnlockHash(), - }) - parentTxn, _, err := w.FundTransaction(&txn, estFee) - if err != nil { - w.ReleaseInputs(append([]types.Transaction{parentTxn}, txn)) - return types.Currency{}, 0, modules.AddContext(err, "couldn't pay transaction fee on swept funds") - } - parents = append(parents, parentTxn) - case !txnCoins.IsZero() && txnFunds != 0: - // If we're sweeping both coins and funds, add a Siacoin output and a - // Siafund output. - txn.SiacoinOutputs = append(txn.SiacoinOutputs, types.SiacoinOutput{ - Value: txnCoins, - Address: uc.UnlockHash(), - }) - txn.SiafundOutputs = append(txn.SiafundOutputs, types.SiafundOutput{ - Value: txnFunds, - Address: uc.UnlockHash(), - }) - } - - // Add signatures for all coins and funds. - for _, output := range txnSiacoinOutputs { - sk := generateSpendableKey(seed, output.seedIndex) - addSignatures(&txn, modules.FullCoveredFields(), sk.UnlockConditions, types.Hash256(output.id), sk, height) - } - for _, sfo := range txnSiafundOutputs { - sk := generateSpendableKey(seed, sfo.seedIndex) - addSignatures(&txn, modules.FullCoveredFields(), sk.UnlockConditions, types.Hash256(sfo.id), sk, height) - } - // Usually, all the inputs will come from swept outputs. However, there is - // an edge case in which inputs will be added from the wallet. To cover - // this case, we iterate through the SiacoinInputs and add a signature for - // any input that belongs to the wallet. - w.mu.RLock() - for _, input := range txn.SiacoinInputs { - if key, ok := w.keys[input.UnlockConditions.UnlockHash()]; ok { - addSignatures(&txn, modules.FullCoveredFields(), input.UnlockConditions, types.Hash256(input.ParentID), key, height) - } - } - w.mu.RUnlock() - - // Append transaction to txnSet. - txnSet := append(parents, txn) - - // Submit the transactions. - err = w.tpool.AcceptTransactionSet(txnSet) - if err != nil { - w.ReleaseInputs(txnSet) - return types.ZeroCurrency, 0, err - } - - w.log.Println("INFO: creating a transaction set to sweep a seed, IDs:") - for _, txn := range txnSet { - w.log.Println("\t", txn.ID()) - } - - coins = coins.Add(txnCoins) - funds = funds + txnFunds - } return } + +// RenterSeed derives a seed to be used by the renter for accessing the +// file contracts. +func (w *Wallet) RenterSeed(email string) []byte { + w.mu.Lock() + defer w.mu.Unlock() + renterSeed := make([]byte, ed25519.SeedSize) + rs := types.HashBytes(append(w.seed[:], []byte(email)...)) + defer frand.Read(rs[:]) + copy(renterSeed, rs[:]) + return renterSeed +} diff --git a/modules/wallet/transaction.go b/modules/wallet/transaction.go new file mode 100644 index 0000000..0886527 --- /dev/null +++ b/modules/wallet/transaction.go @@ -0,0 +1,68 @@ +package wallet + +import ( + "go.sia.tech/core/consensus" + "go.sia.tech/core/types" +) + +// checkOutput is a helper function used to determine if an output is usable. +func (w *Wallet) checkOutput(sce types.SiacoinElement, dustThreshold types.Currency) error { + // Check that an output is not dust. + if sce.SiacoinOutput.Value.Cmp(dustThreshold) < 0 { + return errDustOutput + } + // Check that this output has not recently been spent by the wallet. + if spent := w.used[sce.ID]; spent { + return errSpentOutput + } + + return nil +} + +// StandardTransactionSignature is the most common form of TransactionSignature. +// It covers the entire transaction, references a sole public key, and has no +// timelock. +func StandardTransactionSignature(id types.Hash256) types.TransactionSignature { + return types.TransactionSignature{ + ParentID: id, + CoveredFields: types.CoveredFields{WholeTransaction: true}, + PublicKeyIndex: 0, + } +} + +// SignTransaction signs txn with the given key. The TransactionSignature object +// must already be present in txn at the given index. +func SignTransaction(cs consensus.State, txn *types.Transaction, sigIndex int, key types.PrivateKey) { + tsig := &txn.Signatures[sigIndex] + var sigHash types.Hash256 + if tsig.CoveredFields.WholeTransaction { + sigHash = cs.WholeSigHash(*txn, tsig.ParentID, tsig.PublicKeyIndex, tsig.Timelock, tsig.CoveredFields.Signatures) + } else { + sigHash = cs.PartialSigHash(*txn, tsig.CoveredFields) + } + sig := key.SignHash(sigHash) + tsig.Signature = sig[:] +} + +// MarkWalletInputs scans a transaction and infers which inputs belong to this +// wallet. This allows those inputs to be signed. +func (w *Wallet) MarkWalletInputs(txn types.Transaction) (toSign []types.Hash256) { + w.mu.Lock() + defer w.mu.Unlock() + + for _, sci := range txn.SiacoinInputs { + unlockHash := sci.UnlockConditions.UnlockHash() + if _, exists := w.keys[unlockHash]; exists { + toSign = append(toSign, types.Hash256(sci.ParentID)) + } + } + + for _, sfi := range txn.SiafundInputs { + unlockHash := sfi.UnlockConditions.UnlockHash() + if _, exists := w.keys[unlockHash]; exists { + toSign = append(toSign, types.Hash256(sfi.ParentID)) + } + } + + return +} diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go deleted file mode 100644 index 65deb5d..0000000 --- a/modules/wallet/transactionbuilder.go +++ /dev/null @@ -1,337 +0,0 @@ -package wallet - -import ( - "bytes" - "database/sql" - "errors" - "sort" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - // errDustOutput indicates an output is not spendable because it is dust. - errDustOutput = errors.New("output is too small") - - // errOutputTimelock indicates an output's timelock is still active. - errOutputTimelock = errors.New("wallet consensus set height is lower than the output timelock") - - // errSpendHeightTooHigh indicates an output's spend height is greater than - // the allowed height. - errSpendHeightTooHigh = errors.New("output spend height exceeds the allowed height") -) - -// addSignatures will sign a transaction using a spendable key, with support -// for multisig spendable keys. Because of the restricted input, the function -// is compatible with both Siacoin inputs and Siafund inputs. -func addSignatures(txn *types.Transaction, cf types.CoveredFields, uc types.UnlockConditions, parentID types.Hash256, spendKey spendableKey, height uint64) (newSigIndices []int) { - // Try to find the matching secret key for each public key - some public - // keys may not have a match. Some secret keys may be used multiple times, - // which is why public keys are used as the outer loop. - totalSignatures := uint64(0) - for i, pk := range uc.PublicKeys { - // Search for the matching secret key to the public key. - for j := range spendKey.SecretKeys { - pubKey := spendKey.SecretKeys[j].PublicKey() - if !bytes.Equal(pk.Key, pubKey[:]) { - continue - } - - // Found the right secret key, add a signature. - sig := types.TransactionSignature{ - ParentID: parentID, - CoveredFields: cf, - PublicKeyIndex: uint64(i), - } - newSigIndices = append(newSigIndices, len(txn.Signatures)) - txn.Signatures = append(txn.Signatures, sig) - sigIndex := len(txn.Signatures) - 1 - sigHash := modules.SigHash(*txn, sigIndex, height) - encodedSig := spendKey.SecretKeys[j].SignHash(sigHash) - txn.Signatures[sigIndex].Signature = encodedSig[:] - - // Count that the signature has been added, and break out of the - // secret key loop. - totalSignatures++ - break - } - - // If there are enough signatures to satisfy the unlock conditions, - // break out of the outer loop. - if totalSignatures == uc.SignaturesRequired { - break - } - } - return newSigIndices -} - -// checkOutput is a helper function used to determine if an output is usable. -func (w *Wallet) checkOutput(tx *sql.Tx, currentHeight uint64, id types.SiacoinOutputID, output types.SiacoinOutput, dustThreshold types.Currency) error { - // Check that an output is not dust. - if output.Value.Cmp(dustThreshold) < 0 { - return errDustOutput - } - // Check that this output has not recently been spent by the wallet. - spendHeight, err := dbGetSpentOutput(tx, types.Hash256(id)) - if err == nil { - if spendHeight+RespendTimeout > currentHeight { - return errSpendHeightTooHigh - } - } - outputUnlockConditions := w.keys[output.Address].UnlockConditions - if currentHeight < outputUnlockConditions.Timelock { - return errOutputTimelock - } - - return nil -} - -// FundTransaction adds Siacoin inputs worth at least the requested amount to -// the provided transaction. A change output is also added, if necessary. The -// inputs will not be available to future calls to FundTransaction unless -// ReleaseInputs is called. -func (w *Wallet) FundTransaction(txn *types.Transaction, amount types.Currency) (parentTxn types.Transaction, toSign []types.Hash256, err error) { - if amount.IsZero() { - return - } - // dustThreshold has to be obtained separate from the lock. - dustThreshold, err := w.DustThreshold() - if err != nil { - return - } - - w.mu.Lock() - defer w.mu.Unlock() - - consensusHeight, err := dbGetConsensusHeight(w.dbTx) - if err != nil { - return - } - - // Collect a value-sorted set of Siacoin outputs. - var so sortedOutputs - err = dbForEachSiacoinOutput(w.dbTx, func(scoid types.SiacoinOutputID, sco types.SiacoinOutput) { - so.ids = append(so.ids, scoid) - so.outputs = append(so.outputs, sco) - }) - if err != nil { - return - } - - // Add all of the unconfirmed outputs as well. - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - upt := curr.txn - for i, sco := range upt.Transaction.SiacoinOutputs { - // Determine if the output belongs to the wallet. - _, exists := w.keys[sco.Address] - if !exists { - continue - } - so.ids = append(so.ids, upt.Transaction.SiacoinOutputID(i)) - so.outputs = append(so.outputs, sco) - } - curr = curr.next - } - sort.Sort(sort.Reverse(so)) - - // Create and fund a parent transaction that will add the correct amount of - // Siacoins to the transaction. - var fund types.Currency - // potentialFund tracks the balance of the wallet including outputs that - // have been spent in other unconfirmed transactions recently. This is to - // provide the user with a more useful error message in the event that they - // are overspending. - var potentialFund types.Currency - var spentScoids []types.SiacoinOutputID - for i := range so.ids { - scoid := so.ids[i] - sco := so.outputs[i] - // Check that the output can be spent. - if err := w.checkOutput(w.dbTx, consensusHeight, scoid, sco, dustThreshold); err != nil { - if modules.ContainsError(err, errSpendHeightTooHigh) { - potentialFund = potentialFund.Add(sco.Value) - } - continue - } - - // Add a Siacoin input for this output. - sci := types.SiacoinInput{ - ParentID: scoid, - UnlockConditions: w.keys[sco.Address].UnlockConditions, - } - parentTxn.SiacoinInputs = append(parentTxn.SiacoinInputs, sci) - spentScoids = append(spentScoids, scoid) - - // Add the output to the total fund. - fund = fund.Add(sco.Value) - potentialFund = potentialFund.Add(sco.Value) - if fund.Cmp(amount) >= 0 { - break - } - } - if potentialFund.Cmp(amount) >= 0 && fund.Cmp(amount) < 0 { - return types.Transaction{}, nil, modules.ErrIncompleteTransactions - } - if fund.Cmp(amount) < 0 { - return types.Transaction{}, nil, modules.ErrLowBalance - } - - // Create and add the output that will be used to fund the standard - // transaction. - parentUnlockConditions, err := w.nextPrimarySeedAddress(w.dbTx) - if err != nil { - return types.Transaction{}, nil, err - } - defer func() { - if err != nil { - w.managedMarkAddressUnused(parentUnlockConditions) - } - }() - - exactOutput := types.SiacoinOutput{ - Value: amount, - Address: parentUnlockConditions.UnlockHash(), - } - parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, exactOutput) - - // Create a refund output if needed. - if !amount.Equals(fund) { - refundUnlockConditions, err := w.nextPrimarySeedAddress(w.dbTx) - if err != nil { - return types.Transaction{}, nil, err - } - defer func() { - if err != nil { - w.managedMarkAddressUnused(refundUnlockConditions) - } - }() - refundOutput := types.SiacoinOutput{ - Value: fund.Sub(amount), - Address: refundUnlockConditions.UnlockHash(), - } - parentTxn.SiacoinOutputs = append(parentTxn.SiacoinOutputs, refundOutput) - } - - // Sign all of the inputs to the transaction. - for _, sci := range parentTxn.SiacoinInputs { - addSignatures(&parentTxn, modules.FullCoveredFields(), sci.UnlockConditions, types.Hash256(sci.ParentID), w.keys[sci.UnlockConditions.UnlockHash()], consensusHeight) - } - - // Mark the parent output as spent. Must be done after the transaction is - // finished because otherwise the txid and output id will change. - err = dbPutSpentOutput(w.dbTx, types.Hash256(parentTxn.SiacoinOutputID(0)), consensusHeight) - if err != nil { - return types.Transaction{}, nil, err - } - - // Add the exact output. - newInput := types.SiacoinInput{ - ParentID: parentTxn.SiacoinOutputID(0), - UnlockConditions: parentUnlockConditions, - } - txn.SiacoinInputs = append(txn.SiacoinInputs, newInput) - toSign = append(toSign, types.Hash256(newInput.ParentID)) - - // Mark all outputs that were spent as spent. - for _, scoid := range spentScoids { - err = dbPutSpentOutput(w.dbTx, types.Hash256(scoid), consensusHeight) - if err != nil { - return types.Transaction{}, nil, err - } - } - - return -} - -// SweepTransaction creates a funded txn that sends the inputs of the -// transaction to the specified output if submitted to the blockchain. -func (w *Wallet) SweepTransaction(txn types.Transaction, output types.SiacoinOutput) (types.Transaction, []types.Transaction) { - newTxn := modules.CopyTransaction(txn) - newTxn.SiacoinOutputs = append(newTxn.SiacoinOutputs, output) - _, parents, exists := w.tpool.Transaction(txn.ID()) - if !exists { - w.log.Println("WARN: couldn't find transaction parents") - } - return newTxn, parents -} - -// ReleaseInputs is a helper function that releases the inputs of txn for use in -// other transactions. It should only be called on transactions that are invalid -// or will never be broadcast. -func (w *Wallet) ReleaseInputs(txnSet []types.Transaction) { - w.mu.Lock() - defer w.mu.Unlock() - - // Iterate through all transactions and restore all outputs to the list of - // available outputs. - for _, txn := range txnSet { - for _, sci := range txn.SiacoinInputs { - dbDeleteSpentOutput(w.dbTx, types.Hash256(sci.ParentID)) - } - } -} - -// MarkWalletInputs scans a transaction and infers which inputs belong to this -// wallet. This allows those inputs to be signed. -func (w *Wallet) MarkWalletInputs(txn types.Transaction) (toSign []types.Hash256) { - for _, sci := range txn.SiacoinInputs { - unlockHash := sci.UnlockConditions.UnlockHash() - if w.managedCanSpendUnlockHash(unlockHash) { - toSign = append(toSign, types.Hash256(sci.ParentID)) - } - } - - for _, sfi := range txn.SiafundInputs { - unlockHash := sfi.UnlockConditions.UnlockHash() - if w.managedCanSpendUnlockHash(unlockHash) { - toSign = append(toSign, types.Hash256(sfi.ParentID)) - } - } - - return -} - -// Sign will sign any inputs added by FundTransaction. -func (w *Wallet) Sign(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - w.mu.Lock() - consensusHeight, err := dbGetConsensusHeight(w.dbTx) - w.mu.Unlock() - if err != nil { - return err - } - - // For each Siacoin input covered by toSign, provide a signature. - w.mu.RLock() - defer w.mu.RUnlock() - for _, id := range toSign { - index := -1 - for i, input := range txn.SiacoinInputs { - if id == types.Hash256(input.ParentID) { - index = i - break - } - } - if index == -1 { - return errors.New("toSign references an input not present in the transaction") - } - - input := txn.SiacoinInputs[index] - key, ok := w.keys[input.UnlockConditions.UnlockHash()] - if !ok { - return errors.New("cannot sign input") - } - - addSignatures(txn, cf, input.UnlockConditions, types.Hash256(input.ParentID), key, consensusHeight) - } - - return nil -} - -// DropTransactions is a helper function that releases the inputs of -// a transaction set. It should only be called on transactions that -// are invalid or will never be broadcast. -func (w *Wallet) DropTransactions(txnSet []types.Transaction) { -} diff --git a/modules/wallet/transactions.go b/modules/wallet/transactions.go deleted file mode 100644 index 8871715..0000000 --- a/modules/wallet/transactions.go +++ /dev/null @@ -1,279 +0,0 @@ -package wallet - -import ( - "errors" - - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -var ( - errOutOfBounds = errors.New("requesting transactions at unknown confirmation heights") -) - -// AddressTransactions returns all of the wallet transactions associated with a -// single unlock hash. -func (w *Wallet) AddressTransactions(uh types.Address) (pts []modules.ProcessedTransaction, err error) { - if err := w.tg.Add(); err != nil { - return []modules.ProcessedTransaction{}, err - } - defer w.tg.Done() - - // Ensure durability of reported transactions. - w.mu.Lock() - defer w.mu.Unlock() - if err = w.syncDB(); err != nil { - return - } - - txnIndices, _ := dbGetAddrTransactions(w.dbTx, uh) - for _, i := range txnIndices { - pt, err := dbGetProcessedTransaction(w.dbTx, i) - if err != nil { - continue - } - pts = append(pts, pt) - } - return pts, nil -} - -// AddressUnconfirmedTransactions returns all of the unconfirmed wallet transactions -// related to a specific address. -func (w *Wallet) AddressUnconfirmedTransactions(uh types.Address) (pts []modules.ProcessedTransaction, err error) { - if err := w.tg.Add(); err != nil { - return []modules.ProcessedTransaction{}, err - } - defer w.tg.Done() - - // Ensure durability of reported transactions. - w.mu.Lock() - defer w.mu.Unlock() - if err = w.syncDB(); err != nil { - return - } - - // Scan the full list of unconfirmed transactions to see if there are any - // related transactions. - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - pt := curr.txn - relevant := false - for _, input := range pt.Inputs { - if input.RelatedAddress == uh { - relevant = true - break - } - } - for _, output := range pt.Outputs { - if output.RelatedAddress == uh { - relevant = true - break - } - } - if relevant { - pts = append(pts, pt) - } - curr = curr.next - } - return pts, err -} - -// Transaction returns the transaction with the given id. 'False' is returned -// if the transaction does not exist. -func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTransaction, found bool, err error) { - if err := w.tg.Add(); err != nil { - return modules.ProcessedTransaction{}, false, err - } - defer w.tg.Done() - - // Ensure durability of reported transaction. - w.mu.Lock() - defer w.mu.Unlock() - if err = w.syncDB(); err != nil { - return - } - - // Get the txn for the given txid. - pt, err = dbGetProcessedTransaction(w.dbTx, txid) - if err != nil { - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - txn := curr.txn - if txn.TransactionID == txid { - return txn, true, nil - } - curr = curr.next - } - return modules.ProcessedTransaction{}, false, nil - } - - return pt, true, nil -} - -// Transactions returns all transactions relevant to the wallet that were -// confirmed in the range [startHeight, endHeight]. -func (w *Wallet) Transactions(startHeight, endHeight uint64) (pts []modules.ProcessedTransaction, err error) { - if err := w.tg.Add(); err != nil { - return nil, err - } - defer w.tg.Done() - - // There may be transactions which haven't been saved / committed yet. Sync - // the database to ensure that any information which gets reported to the - // user will be persisted through a restart. - w.mu.Lock() - defer w.mu.Unlock() - if err = w.syncDB(); err != nil { - return nil, err - } - - height, err := dbGetConsensusHeight(w.dbTx) - if err != nil { - return - } else if startHeight > height || startHeight > endHeight { - return nil, errOutOfBounds - } - - rows, err := w.dbTx.Query("SELECT bytes FROM wt_txn") - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var pt modules.ProcessedTransaction - var ptBytes []byte - if err = rows.Scan(&ptBytes); err != nil { - return nil, err - } - - if err = decodeProcessedTransaction(ptBytes, &pt); err != nil { - return nil, err - } - - if pt.ConfirmationHeight > endHeight { - break - } - - if pt.ConfirmationHeight >= startHeight { - pts = append(pts, pt) - } - } - - return -} - -// ComputeValuedTransactions creates ValuedTransaction from a set of -// ProcessedTransactions. -func ComputeValuedTransactions(pts []modules.ProcessedTransaction, blockHeight uint64) ([]modules.ValuedTransaction, error) { - // Loop over all transactions and map the id of each contract to the most - // recent revision within the set. - revisionMap := make(map[types.FileContractID]types.FileContractRevision) - for _, pt := range pts { - for _, rev := range pt.Transaction.FileContractRevisions { - revisionMap[rev.ParentID] = rev - } - } - - sts := make([]modules.ValuedTransaction, 0, len(pts)) - for _, pt := range pts { - // Determine the value of the transaction assuming that it's a regular - // transaction. - var outgoingSiacoins types.Currency - for _, input := range pt.Inputs { - if input.FundType == specifierSiacoinInput && input.WalletAddress { - outgoingSiacoins = outgoingSiacoins.Add(input.Value) - } - } - var incomingSiacoins types.Currency - for _, output := range pt.Outputs { - if output.FundType == specifierMinerPayout && output.WalletAddress { - incomingSiacoins = incomingSiacoins.Add(output.Value) - } - if output.FundType == types.SpecifierSiacoinOutput && output.WalletAddress { - incomingSiacoins = incomingSiacoins.Add(output.Value) - } - } - - // Create the txn assuming that it's a regular txn without contracts or - // revisions. - st := modules.ValuedTransaction{ - ProcessedTransaction: pt, - ConfirmedIncomingValue: incomingSiacoins, - ConfirmedOutgoingValue: outgoingSiacoins, - } - - // If the transaction doesn't contain contracts or revisions we are done. - if len(pt.Transaction.FileContracts) == 0 && len(pt.Transaction.FileContractRevisions) == 0 { - sts = append(sts, st) - continue - } - - // If there are contracts, then there can't be revisions in the - // transaction. - if len(pt.Transaction.FileContracts) > 0 { - // A contract doesn't generate incoming value for the wallet. - st.ConfirmedIncomingValue = types.ZeroCurrency - - // A contract with a revision doesn't have outgoing value since the - // outgoing value is determined by the latest revision. - _, hasRevision := revisionMap[pt.Transaction.FileContractID(0)] - if hasRevision { - st.ConfirmedOutgoingValue = types.ZeroCurrency - } - sts = append(sts, st) - continue - } - - // Else the contract contains a revision. - rev := pt.Transaction.FileContractRevisions[0] - latestRev, ok := revisionMap[rev.ParentID] - if !ok { - err := errors.New("no revision exists for the provided id which should never happen") - return nil, err - } - - // If the revision isn't the latest one, it has neither incoming nor - // outgoing value. - if rev.RevisionNumber != latestRev.RevisionNumber { - st.ConfirmedIncomingValue = types.ZeroCurrency - st.ConfirmedOutgoingValue = types.ZeroCurrency - sts = append(sts, st) - continue - } - - // It is the latest but if it hasn't reached maturiy height yet we - // don't count the incoming value. - if blockHeight <= rev.WindowEnd + modules.MaturityDelay { - st.ConfirmedIncomingValue = types.ZeroCurrency - sts = append(sts, st) - continue - } - - // Otherwise we leave the incoming and outgoing value fields the way - // they are. - sts = append(sts, st) - continue - } - - return sts, nil -} - -// UnconfirmedTransactions returns the set of unconfirmed transactions that are -// relevant to the wallet. -func (w *Wallet) UnconfirmedTransactions() ([]modules.ProcessedTransaction, error) { - if err := w.tg.Add(); err != nil { - return nil, err - } - defer w.tg.Done() - w.mu.RLock() - defer w.mu.RUnlock() - pts := []modules.ProcessedTransaction{} - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - pts = append(pts, curr.txn) - curr = curr.next - } - return pts, nil -} diff --git a/modules/wallet/update.go b/modules/wallet/update.go index ba8454b..4f5e6db 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -1,658 +1,288 @@ package wallet import ( - "database/sql" - "math" - "time" + "fmt" "github.com/mike76-dev/sia-satellite/modules" - "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" + "go.uber.org/zap" ) -type ( - spentSiacoinOutputSet map[types.SiacoinOutputID]types.SiacoinOutput - spentSiafundOutputSet map[types.SiafundOutputID]types.SiafundOutput -) - -// threadedResetSubscriptions unsubscribes the wallet from the consensus set and transaction pool -// and subscribes again. -func (w *Wallet) threadedResetSubscriptions() { - if !w.scanLock.TryLock() { - w.log.Println("ERROR: failed to lock wallet", errScanInProgress) - return - } - defer w.scanLock.Unlock() - - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - - err := w.cs.ConsensusSetSubscribe(w, modules.ConsensusChangeBeginning, w.tg.StopChan()) - if err != nil { - w.log.Println("ERROR: failed to subscribe wallet to consensus", err) - return - } - w.tpool.TransactionPoolSubscribe(w) +type chainUpdate interface { + UpdateElementProof(*types.StateElement) + ForEachTreeNode(func(row, col uint64, h types.Hash256)) + ForEachSiacoinElement(func(types.SiacoinElement, bool)) + ForEachSiafundElement(func(types.SiafundElement, bool)) } -// advanceSeedLookahead generates all keys from the current primary seed progress up to index -// and adds them to the set of spendable keys. Therefore the new primary seed progress will -// be index+1 and new lookahead keys will be generated starting from index+1. -// Returns true if a blockchain rescan is required. -func (w *Wallet) advanceSeedLookahead(index uint64) (bool, error) { - progress, err := dbGetPrimarySeedProgress(w.dbTx) - if err != nil { - return false, err - } - newProgress := index + 1 - - // Add spendable keys and remove them from lookahead. - spendableKeys := generateKeys(w.primarySeed, progress, newProgress - progress) - for _, key := range spendableKeys { - w.keys[key.UnlockConditions.UnlockHash()] = key - delete(w.lookahead, key.UnlockConditions.UnlockHash()) - } - - // Update the primarySeedProgress. - dbPutPrimarySeedProgress(w.dbTx, newProgress) - - // Regenerate lookahead. - w.regenerateLookahead(newProgress) - - // If more than lookaheadRescanThreshold keys were generated - // also initialize a rescan just to be safe. - if uint64(len(spendableKeys)) > lookaheadRescanThreshold { - return true, nil +func (w *Wallet) applyEvents(events []Event) { + for _, event := range events { + w.log.Info("found", zap.String("new", event.String())) } - - return false, nil -} - -// isWalletAddress is a helper function that checks if an Address is -// derived from one of the wallet's spendable keys or is being explicitly watched. -func (w *Wallet) isWalletAddress(uh types.Address) bool { - _, spendable := w.keys[uh] - _, watchonly := w.watchedAddrs[uh] - return spendable || watchonly } -// updateLookahead uses a consensus change to update the seed progress if one of the outputs -// contains an unlock hash of the lookahead set. Returns true if a blockchain rescan is required. -func (w *Wallet) updateLookahead(tx *sql.Tx, cc modules.ConsensusChange) (bool, error) { - var largestIndex uint64 - for _, diff := range cc.SiacoinOutputDiffs { - if index, ok := w.lookahead[diff.SiacoinOutput.Address]; ok { - if index > largestIndex { - largestIndex = index - } - } - } - for _, diff := range cc.SiafundOutputDiffs { - if index, ok := w.lookahead[diff.SiafundOutput.Address]; ok { - if index > largestIndex { - largestIndex = index - } - } - } - if largestIndex > 0 { - return w.advanceSeedLookahead(largestIndex) +func (w *Wallet) revertEvents(events []Event) { + for _, event := range events { + w.log.Info("found", zap.String("reverted", event.String())) } - - return false, nil } -// updateConfirmedSet uses a consensus change to update the confirmed set of -// outputs as understood by the wallet. -func (w *Wallet) updateConfirmedSet(tx *sql.Tx, cc modules.ConsensusChange) error { - for _, diff := range cc.SiacoinOutputDiffs { - // Verify that the diff is relevant to the wallet. - if !w.isWalletAddress(diff.SiacoinOutput.Address) { - continue - } - - var err error - if diff.Direction == modules.DiffApply { - w.log.Println("INFO: wallet has gained a spendable Siacoin output:", diff.ID, "::", diff.SiacoinOutput.Value) - err = dbPutSiacoinOutput(tx, diff.ID, diff.SiacoinOutput) - } else { - w.log.Println("INFO: wallet has lost a spendable Siacoin output:", diff.ID, "::", diff.SiacoinOutput.Value) - err = dbDeleteSiacoinOutput(tx, diff.ID) - } - if err != nil { - w.log.Severe("ERROR: could not update Siacoin output:", err) - return err - } - } - for _, diff := range cc.SiafundOutputDiffs { - // Verify that the diff is relevant to the wallet. - if !w.isWalletAddress(diff.SiafundOutput.Address) { - continue - } - - var err error - if diff.Direction == modules.DiffApply { - w.log.Println("INFO: wallet has gained a spendable Siafund output:", diff.ID, "::", diff.SiafundOutput.Value) - err = dbPutSiafundOutput(tx, diff.ID, diff.SiafundOutput) - } else { - w.log.Println("INFO: wallet has lost a spendable Siafund output:", diff.ID, "::", diff.SiafundOutput.Value) - err = dbDeleteSiafundOutput(tx, diff.ID) - } - if err != nil { - w.log.Severe("ERROR: could not update Siafund output:", err) - return err - } - } - for _, diff := range cc.SiafundPoolDiffs { - var err error - if diff.Direction == modules.DiffApply { - err = dbPutSiafundPool(tx, diff.Adjusted) - } else { - err = dbPutSiafundPool(tx, diff.Previous) - } - if err != nil { - w.log.Severe("ERROR: could not update Siafund pool:", err) - return err +func (w *Wallet) addSiacoinElements(sces []types.SiacoinElement) error { + for _, sce := range sces { + if err := w.insertSiacoinElement(sce); err != nil { + return modules.AddContext(err, "failed to insert output") } + w.log.Debug("added UTXO", zap.Stringer("address", sce.SiacoinOutput.Address), zap.Stringer("value", sce.SiacoinOutput.Value)) } return nil } -// revertHistory reverts any transaction history that was destroyed by reverted -// blocks in the consensus change. -func (w *Wallet) revertHistory(tx *sql.Tx, reverted []types.Block) error { - for _, block := range reverted { - // Remove any transactions that have been reverted. - for i := len(block.Transactions) - 1; i >= 0; i-- { - // If the transaction is relevant to the wallet, it will be the - // most recent transaction in bucketProcessedTransactions. - txid := block.Transactions[i].ID() - pt, err := dbGetLastProcessedTransaction(tx) - if err != nil { - break - } - if txid == pt.TransactionID { - w.log.Println("INFO: a wallet transaction has been reverted due to a reorg:", txid) - if err := dbDeleteLastProcessedTransaction(tx); err != nil { - w.log.Severe("ERROR: could not revert transaction:", err) - return err - } - } +func (w *Wallet) removeSiacoinElements(sces []types.SiacoinElement) error { + for _, sce := range sces { + if err := w.deleteSiacoinElement(sce.SiacoinOutput.Address); err != nil { + return modules.AddContext(err, "failed to delete output") } - - // Remove the miner payout transaction if applicable. - for i, mp := range block.MinerPayouts { - // If the transaction is relevant to the wallet, it will be the - // most recent transaction in bucketProcessedTransactions. - pt, err := dbGetLastProcessedTransaction(tx) - if err != nil { - break - } - if types.TransactionID(block.ID()) == pt.TransactionID { - w.log.Println("INFO: miner payout has been reverted due to a reorg:", block.ID().MinerOutputID(i), "::", mp.Value) - if err := dbDeleteLastProcessedTransaction(tx); err != nil { - w.log.Severe("ERROR: could not revert transaction:", err) - return err - } - break // There will only ever be one miner transaction. - } + if err := w.removeSpentOutput(sce.ID); err != nil { + return modules.AddContext(err, "failed to remove spent output") } + w.log.Debug("removed UTXO", zap.Stringer("address", sce.SiacoinOutput.Address), zap.Stringer("value", sce.SiacoinOutput.Value)) } return nil } -// computeSpentSiacoinOutputSet scans a slice of Siacoin output diffs for spent -// outputs and collects them in a map of SiacoinOutputID -> SiacoinOutput. -func computeSpentSiacoinOutputSet(diffs []modules.SiacoinOutputDiff) spentSiacoinOutputSet { - outputs := make(spentSiacoinOutputSet) - for _, diff := range diffs { - if diff.Direction == modules.DiffRevert { - // DiffRevert means spent. - outputs[diff.ID] = diff.SiacoinOutput +func (w *Wallet) addSiafundElements(sfes []types.SiafundElement) error { + for _, sfe := range sfes { + if err := w.insertSiafundElement(sfe); err != nil { + return modules.AddContext(err, "failed to insert output") } + w.log.Debug("added UTXO", zap.Stringer("address", sfe.SiafundOutput.Address), zap.Uint64("value", sfe.SiafundOutput.Value)) } - return outputs + return nil } -// computeSpentSiafundOutputSet scans a slice of Siafund output diffs for spent -// outputs and collects them in a map of SiafundOutputID -> SiafundOutput. -func computeSpentSiafundOutputSet(diffs []modules.SiafundOutputDiff) spentSiafundOutputSet { - outputs := make(spentSiafundOutputSet) - for _, diff := range diffs { - if diff.Direction == modules.DiffRevert { - // DiffRevert means spent. - outputs[diff.ID] = diff.SiafundOutput +func (w *Wallet) removeSiafundElements(sfes []types.SiafundElement) error { + for _, sfe := range sfes { + if err := w.deleteSiafundElement(sfe.SiafundOutput.Address); err != nil { + return modules.AddContext(err, "failed to delete output") + } + if err := w.removeSpentOutput(sfe.ID); err != nil { + return modules.AddContext(err, "failed to remove spent output") } + w.log.Debug("removed UTXO", zap.Stringer("address", sfe.SiafundOutput.Address), zap.Uint64("value", sfe.SiafundOutput.Value)) } - return outputs + return nil } -// computeProcessedTransactionsFromBlock searches all the miner payouts and -// transactions in a block and computes a ProcessedTransaction slice containing -// all of the transactions processed for the given block. -func (w *Wallet) computeProcessedTransactionsFromBlock(tx *sql.Tx, block types.Block, spentSiacoinOutputs spentSiacoinOutputSet, spentSiafundOutputs spentSiafundOutputSet, consensusHeight uint64) []modules.ProcessedTransaction { - var pts []modules.ProcessedTransaction - - // Find ProcessedTransactions from miner payouts. - relevant := false - for _, mp := range block.MinerPayouts { - relevant = relevant || w.isWalletAddress(mp.Address) - } - if relevant { - w.log.Println("INFO: wallet has received new miner payouts:", block.ID()) - // Apply the miner payout transaction if applicable. - minerPT := modules.ProcessedTransaction{ - Transaction: types.Transaction{}, - TransactionID: types.TransactionID(block.ID()), - ConfirmationHeight: consensusHeight, - ConfirmationTimestamp: block.Timestamp, - } - for i, mp := range block.MinerPayouts { - w.log.Println("\tminer payout:", block.ID().MinerOutputID(i), "::", mp.Value) - minerPT.Outputs = append(minerPT.Outputs, modules.ProcessedOutput{ - ID: types.Hash256(block.ID().MinerOutputID(i)), - FundType: specifierMinerPayout, - MaturityHeight: consensusHeight + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(mp.Address), - RelatedAddress: mp.Address, - Value: mp.Value, - }) - } - pts = append(pts, minerPT) +// applyChainUpdate atomically applies the chain update to the database. +func (w *Wallet) applyChainUpdate(cau chain.ApplyUpdate) error { + relevantAddress := func(addr types.Address) bool { + return w.ownsAddress(addr) } - // Find ProcessedTransactions from transactions. - for _, txn := range block.Transactions { - // Determine if transaction is relevant. - relevant := false - for _, sci := range txn.SiacoinInputs { - relevant = relevant || w.isWalletAddress(sci.UnlockConditions.UnlockHash()) - } - for _, sco := range txn.SiacoinOutputs { - relevant = relevant || w.isWalletAddress(sco.Address) + // Determine which Siacoin and Siafund elements are ephemeral. + created := make(map[types.Hash256]bool) + ephemeral := make(map[types.Hash256]bool) + for _, txn := range cau.Block.Transactions { + for i := range txn.SiacoinOutputs { + created[types.Hash256(txn.SiacoinOutputID(i))] = true } - for _, sfi := range txn.SiafundInputs { - relevant = relevant || w.isWalletAddress(sfi.UnlockConditions.UnlockHash()) + for _, input := range txn.SiacoinInputs { + ephemeral[types.Hash256(input.ParentID)] = created[types.Hash256(input.ParentID)] } - for _, sfo := range txn.SiafundOutputs { - relevant = relevant || w.isWalletAddress(sfo.Address) + for i := range txn.SiafundOutputs { + created[types.Hash256(txn.SiafundOutputID(i))] = true } - for _, fc := range txn.FileContracts { - for _, o := range fc.ValidProofOutputs { - relevant = relevant || w.isWalletAddress(o.Address) - } - for _, o := range fc.MissedProofOutputs { - relevant = relevant || w.isWalletAddress(o.Address) - } + for _, input := range txn.SiafundInputs { + ephemeral[types.Hash256(input.ParentID)] = created[types.Hash256(input.ParentID)] } - for _, fcr := range txn.FileContractRevisions { - for _, o := range fcr.ValidProofOutputs { - relevant = relevant || w.isWalletAddress(o.Address) - } - for _, o := range fcr.MissedProofOutputs { - relevant = relevant || w.isWalletAddress(o.Address) - } + } + + // Add new Siacoin elements to the store. + var newSiacoinElements, spentSiacoinElements []types.SiacoinElement + cau.ForEachSiacoinElement(func(se types.SiacoinElement, spent bool) { + if ephemeral[se.ID] { + return } - // Only create a ProcessedTransaction if transaction is relevant. - if !relevant { - continue + if !relevantAddress(se.SiacoinOutput.Address) { + return } - w.log.Println("INFO: a transaction has been confirmed on the blockchain:", txn.ID()) - pt := modules.ProcessedTransaction{ - Transaction: txn, - TransactionID: txn.ID(), - ConfirmationHeight: consensusHeight, - ConfirmationTimestamp: block.Timestamp, + if spent { + spentSiacoinElements = append(spentSiacoinElements, se) + } else { + newSiacoinElements = append(newSiacoinElements, se) } + }) - for _, sci := range txn.SiacoinInputs { - pi := modules.ProcessedInput{ - ParentID: types.Hash256(sci.ParentID), - FundType: specifierSiacoinInput, - WalletAddress: w.isWalletAddress(sci.UnlockConditions.UnlockHash()), - RelatedAddress: sci.UnlockConditions.UnlockHash(), - Value: spentSiacoinOutputs[sci.ParentID].Value, - } - pt.Inputs = append(pt.Inputs, pi) - - // Log any wallet-relevant inputs. - if pi.WalletAddress { - w.log.Println("\tSiacoin Input:", pi.ParentID, "::", pi.Value) - } + if err := w.addSiacoinElements(newSiacoinElements); err != nil { + return modules.AddContext(err, "failed to add Siacoin elements") + } else if err := w.removeSiacoinElements(spentSiacoinElements); err != nil { + return modules.AddContext(err, "failed to remove Siacoin elements") + } + + // Add new Siafund elements to the store. + var newSiafundElements, spentSiafundElements []types.SiafundElement + cau.ForEachSiafundElement(func(se types.SiafundElement, spent bool) { + if ephemeral[se.ID] { + return } - for i, sco := range txn.SiacoinOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(txn.SiacoinOutputID(i)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: consensusHeight, - WalletAddress: w.isWalletAddress(sco.Address), - RelatedAddress: sco.Address, - Value: sco.Value, - } - pt.Outputs = append(pt.Outputs, po) - - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tSiacoin Output:", po.ID, "::", po.Value) - } + if !relevantAddress(se.SiafundOutput.Address) { + return } - for _, sfi := range txn.SiafundInputs { - pi := modules.ProcessedInput{ - ParentID: types.Hash256(sfi.ParentID), - FundType: specifierSiafundInput, - WalletAddress: w.isWalletAddress(sfi.UnlockConditions.UnlockHash()), - RelatedAddress: sfi.UnlockConditions.UnlockHash(), - Value: types.NewCurrency64(spentSiafundOutputs[sfi.ParentID].Value), - } - pt.Inputs = append(pt.Inputs, pi) - // Log any wallet-relevant inputs. - if pi.WalletAddress { - w.log.Println("\tSiafund Input:", pi.ParentID, "::", pi.Value) - } - - siafundPool, err := dbGetSiafundPool(w.dbTx) - if err != nil { - w.log.Severe("ERROR: could not get Siafund pool: ", err) - continue - } - - sfo := spentSiafundOutputs[sfi.ParentID] - po := modules.ProcessedOutput{ - ID: types.Hash256(sfi.ParentID), - FundType: specifierClaimOutput, - MaturityHeight: consensusHeight + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(sfi.UnlockConditions.UnlockHash()), - RelatedAddress: sfi.ClaimAddress, - // TODO This code is incorrect, because it doesn't take sfo.ClaimStart - // into account. For now, we have no way to fetch sfo.ClaimStart, because - // it is absent in `core`. - Value: siafundPool.Mul64(sfo.Value), - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tClaim Output:", po.ID, "::", po.Value) - } + if spent { + spentSiafundElements = append(spentSiafundElements, se) + } else { + newSiafundElements = append(newSiafundElements, se) } + }) + + if err := w.addSiafundElements(newSiafundElements); err != nil { + return modules.AddContext(err, "failed to add Siafund elements") + } else if err := w.removeSiafundElements(spentSiafundElements); err != nil { + return modules.AddContext(err, "failed to remove Siafund elements") + } + + // Apply new events. + w.applyEvents(AppliedEvents(cau.State, cau.Block, cau, relevantAddress)) + + // Update proofs. + if err := w.updateSiacoinElementProofs(cau); err != nil { + return modules.AddContext(err, "failed to update Siacoin element proofs") + } else if err := w.updateSiafundElementProofs(cau); err != nil { + return modules.AddContext(err, "failed to update Siafund element proofs") + } + + if err := w.updateTip(cau.State.Index); err != nil { + return modules.AddContext(err, "failed to update last indexed tip") + } + return nil +} - for i, sfo := range txn.SiafundOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(txn.SiafundOutputID(i)), - FundType: types.SpecifierSiafundOutput, - MaturityHeight: consensusHeight, - WalletAddress: w.isWalletAddress(sfo.Address), - RelatedAddress: sfo.Address, - Value: types.NewCurrency64(sfo.Value), - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tSiafund Output:", po.ID, "::", po.Value) - } +// revertChainUpdate atomically reverts the chain update from the database. +func (w *Wallet) revertChainUpdate(cru chain.RevertUpdate) error { + relevantAddress := func(addr types.Address) bool { + return w.ownsAddress(addr) + } + + // Determine which Siacoin and Siafund elements are ephemeral. + created := make(map[types.Hash256]bool) + ephemeral := make(map[types.Hash256]bool) + for _, txn := range cru.Block.Transactions { + for i := range txn.SiacoinOutputs { + created[types.Hash256(txn.SiacoinOutputID(i))] = true + } + for _, input := range txn.SiacoinInputs { + ephemeral[types.Hash256(input.ParentID)] = created[types.Hash256(input.ParentID)] } + for i := range txn.SiafundOutputs { + created[types.Hash256(txn.SiafundOutputID(i))] = true + } + for _, input := range txn.SiafundInputs { + ephemeral[types.Hash256(input.ParentID)] = created[types.Hash256(input.ParentID)] + } + } - for _, fee := range txn.MinerFees { - pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ - FundType: specifierMinerFee, - MaturityHeight: consensusHeight + modules.MaturityDelay, - Value: fee, - }) + var removedSiacoinElements, addedSiacoinElements []types.SiacoinElement + cru.ForEachSiacoinElement(func(se types.SiacoinElement, spent bool) { + if ephemeral[se.ID] { + return } - for i, fc := range txn.FileContracts { - for j, o := range fc.ValidProofOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(modules.StorageProofOutputID(txn.FileContractID(i), true, j)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: fc.WindowEnd + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(o.Address), - RelatedAddress: o.Address, - Value: o.Value, - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tFile Contract Valid Output:", po.ID, "::", po.Value) - } - } - for j, o := range fc.MissedProofOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(modules.StorageProofOutputID(txn.FileContractID(i), false, j)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: fc.WindowEnd + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(o.Address), - RelatedAddress: o.Address, - Value: o.Value, - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tFile Contract Missed Output:", po.ID, "::", po.Value) - } - } + if !relevantAddress(se.SiacoinOutput.Address) { + return } - for _, fcr := range txn.FileContractRevisions { - for j, o := range fcr.ValidProofOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(modules.StorageProofOutputID(fcr.ParentID, true, j)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: fcr.WindowEnd + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(o.Address), - RelatedAddress: o.Address, - Value: o.Value, - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tFile Contract Revision Valid Output:", po.ID, "::", po.Value) - } - } - for j, o := range fcr.MissedProofOutputs { - po := modules.ProcessedOutput{ - ID: types.Hash256(modules.StorageProofOutputID(fcr.ParentID, false, j)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: fcr.WindowEnd + modules.MaturityDelay, - WalletAddress: w.isWalletAddress(o.Address), - RelatedAddress: o.Address, - Value: o.Value, - } - pt.Outputs = append(pt.Outputs, po) - // Log any wallet-relevant outputs. - if po.WalletAddress { - w.log.Println("\tFile Contract Revision Missed Output:", po.ID, "::", po.Value) - } - } + if spent { + // Re-add any spent Siacoin elements. + addedSiacoinElements = append(addedSiacoinElements, se) + } else { + // Delete any created Siacoin elements. + removedSiacoinElements = append(removedSiacoinElements, se) } + }) - pts = append(pts, pt) + // Revert Siacoin element changes. + if err := w.addSiacoinElements(addedSiacoinElements); err != nil { + return modules.AddContext(err, "failed to add Siacoin elements") + } else if err := w.removeSiacoinElements(removedSiacoinElements); err != nil { + return modules.AddContext(err, "failed to remove Siacoin elements") } - return pts -} -// applyHistory applies any transaction history that the applied blocks -// introduced. -func (w *Wallet) applyHistory(tx *sql.Tx, cc modules.ConsensusChange) error { - spentSiacoinOutputs := computeSpentSiacoinOutputSet(cc.SiacoinOutputDiffs) - spentSiafundOutputs := computeSpentSiafundOutputSet(cc.SiafundOutputDiffs) - consensusHeight := cc.InitialHeight() - - for _, block := range cc.AppliedBlocks { - // Increment the consensus height. - if block.ID() != modules.GenesisID { - consensusHeight++ + var removedSiafundElements, addedSiafundElements []types.SiafundElement + cru.ForEachSiafundElement(func(se types.SiafundElement, spent bool) { + if ephemeral[se.ID] { + return } - pts := w.computeProcessedTransactionsFromBlock(tx, block, spentSiacoinOutputs, spentSiafundOutputs, consensusHeight) - for _, pt := range pts { - err := dbAppendProcessedTransaction(tx, pt) - if err != nil { - return modules.AddContext(err, "could not put processed transaction") - } + if !relevantAddress(se.SiafundOutput.Address) { + return } - } - return nil -} + if spent { + // Re-add any spent Siafund elements. + addedSiafundElements = append(addedSiafundElements, se) + } else { + // Delete any created Siafund elements. + removedSiafundElements = append(removedSiafundElements, se) + } + }) -// ProcessConsensusChange parses a consensus change to update the set of -// confirmed outputs known to the wallet. -func (w *Wallet) ProcessConsensusChange(cc modules.ConsensusChange) { - if err := w.tg.Add(); err != nil { - return + // Revert Siafund element changes. + if err := w.addSiafundElements(addedSiafundElements); err != nil { + return modules.AddContext(err, "failed to add Siafund elements") + } else if err := w.removeSiafundElements(removedSiafundElements); err != nil { + return modules.AddContext(err, "failed to remove Siafund elements") } - defer w.tg.Done() - - w.mu.Lock() - defer w.mu.Unlock() - if needRescan, err := w.updateLookahead(w.dbTx, cc); err != nil { - w.log.Severe("ERROR: failed to update lookahead:", err) - w.dbRollback = true - } else if needRescan { - go w.threadedResetSubscriptions() - } - if err := w.updateConfirmedSet(w.dbTx, cc); err != nil { - w.log.Severe("ERROR: failed to update confirmed set:", err) - w.dbRollback = true - } - if err := w.revertHistory(w.dbTx, cc.RevertedBlocks); err != nil { - w.log.Severe("ERROR: failed to revert consensus change:", err) - w.dbRollback = true - } - if err := w.applyHistory(w.dbTx, cc); err != nil { - w.log.Severe("ERROR: failed to apply consensus change:", err) - w.dbRollback = true - } - if err := dbPutConsensusChangeID(w.dbTx, cc.ID); err != nil { - w.log.Severe("ERROR: failed to update consensus change ID:", err) - w.dbRollback = true - } - if err := dbPutConsensusHeight(w.dbTx, cc.BlockHeight); err != nil { - w.log.Severe("ERROR: failed to update consensus block height:", err) - w.dbRollback = true + // Update proofs. + if err := w.updateSiacoinElementProofs(cru); err != nil { + return modules.AddContext(err, "failed to update Siacoin element proofs") + } else if err := w.updateSiafundElementProofs(cru); err != nil { + return modules.AddContext(err, "failed to update Siafund element proofs") } - if cc.Synced { - go w.threadedDefragWallet() - } -} + // Revert events. + w.revertEvents(AppliedEvents(cru.State, cru.Block, cru, relevantAddress)) -// ReceiveUpdatedUnconfirmedTransactions updates the wallet's unconfirmed -// transaction set. -func (w *Wallet) ReceiveUpdatedUnconfirmedTransactions(diff *modules.TransactionPoolDiff) { - if err := w.tg.Add(); err != nil { - return + if err := w.updateTip(cru.State.Index); err != nil { + return modules.AddContext(err, "failed to update last indexed tip") } - defer w.tg.Done() + return nil +} +// UpdateChainState applies and reverts the ChainManager updates. +func (w *Wallet) UpdateChainState(reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error { w.mu.Lock() defer w.mu.Unlock() - // Do the pruning first. If there are any pruned transactions, we will need - // to re-allocate the whole processed transactions array. - droppedTransactions := make(map[types.TransactionID]struct{}) - for i := range diff.RevertedTransactions { - txids := w.unconfirmedSets[diff.RevertedTransactions[i]] - for i := range txids { - droppedTransactions[txids[i]] = struct{}{} + for _, cru := range reverted { + revertedIndex := types.ChainIndex{ + ID: cru.Block.ID(), + Height: cru.State.Index.Height + 1, + } + if err := w.revertChainUpdate(cru); err != nil { + return fmt.Errorf("failed to revert chain update %q: %w", revertedIndex, err) } - delete(w.unconfirmedSets, diff.RevertedTransactions[i]) } - // Skip the reallocation if we can, otherwise reallocate the - // unconfirmedProcessedTransactions to no longer have the dropped - // transactions. - if len(droppedTransactions) != 0 { - // Capacity can't be reduced, because we have no way of knowing if the - // dropped transactions are relevant to the wallet or not, and some will - // not be relevant to the wallet, meaning they don't have a counterpart - // in w.unconfirmedProcessedTransactions. - var newUPT processedTransactionList - curr := w.unconfirmedProcessedTransactions.head - for curr != nil { - _, exists := droppedTransactions[curr.txn.TransactionID] - if !exists { - // Transaction was not dropped, add it to the new unconfirmed - // transactions. - newUPT.add(curr.txn) - } - curr = curr.next + for _, cau := range applied { + if err := w.applyChainUpdate(cau); err != nil { + return fmt.Errorf("failed to apply chain update %q: %w", cau.State.Index, err) } - - // Set the unconfirmed processed transactions to the pruned set. - w.unconfirmedProcessedTransactions = newUPT } - // Scroll through all of the diffs and add any new transactions. - for _, unconfirmedTxnSet := range diff.AppliedTransactions { - // Mark all of the transactions that appeared in this set. - // - // TODO: Technically only necessary to mark the ones that are relevant - // to the wallet, but overhead should be low. - w.unconfirmedSets[unconfirmedTxnSet.ID] = unconfirmedTxnSet.IDs - - // Get the values for the spent outputs. - spentSiacoinOutputs := make(map[types.SiacoinOutputID]types.SiacoinOutput) - for _, scod := range unconfirmedTxnSet.Change.SiacoinOutputDiffs { - // Only need to grab the reverted ones, because only reverted ones - // have the possibility of having been spent. - if scod.Direction == modules.DiffRevert { - spentSiacoinOutputs[scod.ID] = scod.SiacoinOutput - } - } + if err := w.save(); err != nil { + w.log.Error("couldn't save wallet", zap.Error(err)) + return modules.AddContext(err, "couldn't commit changes") + } - // Add each transaction to our set of unconfirmed transactions. - for i, txn := range unconfirmedTxnSet.Transactions { - // Determine whether transaction is relevant to the wallet. - relevant := false - for _, sci := range txn.SiacoinInputs { - relevant = relevant || w.isWalletAddress(sci.UnlockConditions.UnlockHash()) - } - for _, sco := range txn.SiacoinOutputs { - relevant = relevant || w.isWalletAddress(sco.Address) - } - - // Only create a ProcessedTransaction if txn is relevant. - if !relevant { - continue - } - - pt := modules.ProcessedTransaction{ - Transaction: txn, - TransactionID: unconfirmedTxnSet.IDs[i], - ConfirmationHeight: math.MaxUint64, - ConfirmationTimestamp: time.Unix(math.MaxInt64, math.MaxInt64), - } - for _, sci := range txn.SiacoinInputs { - pt.Inputs = append(pt.Inputs, modules.ProcessedInput{ - ParentID: types.Hash256(sci.ParentID), - FundType: specifierSiacoinInput, - WalletAddress: w.isWalletAddress(sci.UnlockConditions.UnlockHash()), - RelatedAddress: sci.UnlockConditions.UnlockHash(), - Value: spentSiacoinOutputs[sci.ParentID].Value, - }) - } - for i, sco := range txn.SiacoinOutputs { - pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ - ID: types.Hash256(txn.SiacoinOutputID(i)), - FundType: types.SpecifierSiacoinOutput, - MaturityHeight: math.MaxUint64, - WalletAddress: w.isWalletAddress(sco.Address), - RelatedAddress: sco.Address, - Value: sco.Value, - }) - } - for _, fee := range txn.MinerFees { - pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ - FundType: specifierMinerFee, - Value: fee, - }) - } - w.unconfirmedProcessedTransactions.add(pt) - } + if w.synced() { + go w.threadedDefragWallet() } + + return nil } diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index bf5dcd9..4a457f5 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -3,155 +3,63 @@ package wallet import ( "bytes" "database/sql" - "errors" + "fmt" + "path/filepath" "sort" "sync" + "time" siasync "github.com/mike76-dev/sia-satellite/internal/sync" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/persist" - "go.sia.tech/core/types" + "go.sia.tech/coreutils/chain" + "go.uber.org/zap" ) -const ( - // RespendTimeout records the number of blocks that the wallet will wait - // before spending an output that has been spent in the past. If the - // transaction spending the output has not made it to the transaction pool - // after the limit, the assumption is that it never will. - RespendTimeout = 100 -) - -var ( - errNilDB = errors.New("wallet cannot initialize with a nil database") - errNilConsensusSet = errors.New("wallet cannot initialize with a nil consensus set") - errNilTpool = errors.New("wallet cannot initialize with a nil transaction pool") -) - -// spendableKey is a set of secret keys plus the corresponding unlock -// conditions. The public key can be derived from the secret key and then -// matched to the corresponding public keys in the unlock conditions. All -// addresses that are to be used in 'FundTransaction' must conform to this -// form of spendable key. -type spendableKey struct { - UnlockConditions types.UnlockConditions - SecretKeys []types.PrivateKey -} - -// EncodeTo implements types.EncoderTo. -func (sk *spendableKey) EncodeTo(e *types.Encoder) { - sk.UnlockConditions.EncodeTo(e) - e.WritePrefix(len(sk.SecretKeys)) - for _, key := range sk.SecretKeys { - e.WriteBytes(key[:]) +type ( + // Wallet manages funds and signs transactions. + Wallet struct { + cm *chain.Manager + s modules.Syncer + db *sql.DB + tx *sql.Tx + log *zap.Logger + + mu sync.Mutex + tg siasync.ThreadGroup + closeFn func() + + seed modules.Seed + addrs map[types.Address]uint64 + keys map[types.Address]types.PrivateKey + unusedKeys map[types.Address]types.UnlockConditions + lookahead map[types.Address]uint64 + watchedAddrs map[types.Address]uint64 + sces map[types.Address]types.SiacoinElement + sfes map[types.Address]types.SiafundElement + used map[types.Hash256]bool + tip types.ChainIndex + dbError bool } -} +) -// DecodeFrom implements types.DecoderFrom. -func (sk *spendableKey) DecodeFrom(d *types.Decoder) { - sk.UnlockConditions.DecodeFrom(d) - sk.SecretKeys = make([]types.PrivateKey, d.ReadPrefix()) - for i := 0; i < len(sk.SecretKeys); i++ { - sk.SecretKeys[i] = types.PrivateKey(d.ReadBytes()) +// Close shuts down the wallet. +func (w *Wallet) Close() error { + err := w.tg.Stop() + if err != nil { + w.log.Error("couldn't stop threads", zap.Error(err)) } + err = w.save() + w.closeFn() + return err } -// Wallet is an object that tracks balances, creates keys and addresses, -// manages building and sending transactions. -type Wallet struct { - // encrypted indicates whether the wallet has been encrypted (i.e. - // initialized). unlocked indicates whether the wallet is currently - // storing secret keys in memory. subscribed indicates whether the wallet - // has subscribed to the consensus set yet - the wallet is unable to - // subscribe to the consensus set until it has been unlocked for the first - // time. The primary seed is used to generate new addresses for the - // wallet. - encrypted bool - unlocked bool - primarySeed modules.Seed - - // Fields that handle the subscriptions to the cs and tpool. subscribedMu - // needs to be locked when subscribed is accessed and while calling the - // subscribing methods on the tpool and consensusset. - subscribedMu sync.Mutex - subscribed bool - - // The wallet's dependencies. - db *sql.DB - cs modules.ConsensusSet - tpool modules.TransactionPool - - // The following set of fields are responsible for tracking the confirmed - // outputs, and for being able to spend them. The seeds are used to derive - // the keys that are tracked on the blockchain. All keys are pregenerated - // from the seeds, when checking new outputs or spending outputs, the seeds - // are not referenced at all. The seeds are only stored so that the user - // may access them. - seeds []modules.Seed - unusedKeys map[types.Address]types.UnlockConditions - keys map[types.Address]spendableKey - lookahead map[types.Address]uint64 - watchedAddrs map[types.Address]struct{} - - // unconfirmedProcessedTransactions tracks unconfirmed transactions. - unconfirmedSets map[modules.TransactionSetID][]types.TransactionID - unconfirmedProcessedTransactions processedTransactionList - - // The wallet's database tracks its seeds, keys, outputs, and - // transactions. A global db transaction is maintained in memory to avoid - // excessive disk writes. Any operations involving dbTx must hold an - // exclusive lock. - // - // If dbRollback is set, then when the database syncs it will perform a - // rollback instead of a commit. For safety reasons, the db will close and - // the wallet will close if a rollback is performed. - // Syncing flag indicates if the database syncing is currently going on. - dbRollback bool - syncing bool - dbTx *sql.Tx - - log *persist.Logger - mu sync.RWMutex - - // A separate TryMutex is used to protect against concurrent unlocking or - // initialization. - scanLock siasync.TryMutex - - // The wallet's ThreadGroup tells tracked functions to shut down and - // blocks until they have all exited before returning from Close. - tg siasync.ThreadGroup - - // defragDisabled determines if the wallet is set to defrag outputs once it - // reaches a certain threshold - defragDisabled bool -} - -// Height return the internal processed consensus height of the wallet. -func (w *Wallet) Height() (uint64, error) { - if err := w.tg.Add(); err != nil { - return 0, modules.ErrWalletShutdown - } - defer w.tg.Done() - +// Tip returns the current tip of the wallet. +func (w *Wallet) Tip() types.ChainIndex { w.mu.Lock() defer w.mu.Unlock() - err := w.syncDB() - if err != nil { - return 0, err - } - - var height uint64 - tx, err := w.db.Begin() - if err != nil { - return 0, err - } - err = tx.QueryRow("SELECT height FROM wt_info WHERE id = 1").Scan(&height) - tx.Commit() - if err != nil { - return 0, err - } - - return height, nil + return w.tip } // LastAddresses returns the last n addresses starting at the last seedProgress @@ -160,7 +68,7 @@ func (w *Wallet) Height() (uint64, error) { // be retrieved in reverse order by simply supplying math.MaxUint64 for n. func (w *Wallet) LastAddresses(n uint64) ([]types.Address, error) { if err := w.tg.Add(); err != nil { - return nil, modules.ErrWalletShutdown + return nil, err } defer w.tg.Done() @@ -168,15 +76,14 @@ func (w *Wallet) LastAddresses(n uint64) ([]types.Address, error) { defer w.mu.Unlock() // Get the current seed progress from disk. - var seedProgress uint64 tx, err := w.db.Begin() if err != nil { - return []types.Address{}, err + return nil, err } - err = tx.QueryRow("SELECT progress FROM wt_info WHERE id = 1").Scan(&seedProgress) + seedProgress, err := w.getSeedProgress() tx.Commit() if err != nil { - return []types.Address{}, err + return nil, err } // At most seedProgess addresses can be requested. @@ -186,75 +93,25 @@ func (w *Wallet) LastAddresses(n uint64) ([]types.Address, error) { start := seedProgress - n // Generate the keys. - keys := generateKeys(w.primarySeed, start, n) - uhs := make([]types.Address, 0, len(keys)) + keys := generateKeys(w.seed, start, n) + addrs := make([]types.Address, 0, len(keys)) for i := len(keys) - 1; i >= 0; i-- { - uhs = append(uhs, keys[i].UnlockConditions.UnlockHash()) - } - - return uhs, nil -} - -// New creates a new wallet. Keys and addresses are not loaded into the -// wallet during the call to 'New', but rather during the call to 'Unlock'. -func New(db *sql.DB, cs modules.ConsensusSet, tpool modules.TransactionPool, dir string) (*Wallet, error) { - // Check for nil dependencies. - if db == nil { - return nil, errNilDB - } - if cs == nil { - return nil, errNilConsensusSet - } - if tpool == nil { - return nil, errNilTpool - } - - // Initialize the data structure. - w := &Wallet{ - db: db, - cs: cs, - tpool: tpool, - - keys: make(map[types.Address]spendableKey), - lookahead: make(map[types.Address]uint64), - unusedKeys: make(map[types.Address]types.UnlockConditions), - watchedAddrs: make(map[types.Address]struct{}), - - unconfirmedSets: make(map[modules.TransactionSetID][]types.TransactionID), - } - err := w.initPersist(dir) - if err != nil { - return nil, err + addrs = append(addrs, types.StandardUnlockHash(keys[i].PublicKey())) } - return w, nil -} -// Close terminates all ongoing processes involving the wallet, enabling -// garbage collection. -func (w *Wallet) Close() error { - w.cs.Unsubscribe(w) - w.tpool.Unsubscribe(w) - var lockErr error - // Lock the wallet outside of mu.Lock because Lock uses its own mu.Lock. - // Once the wallet is locked it cannot be unlocked except using the - // unexported unlock method (w.Unlock returns an error if the wallet's - // ThreadGroup is stopped). - if w.managedUnlocked() { - lockErr = w.managedLock() - } - return modules.ComposeErrors(lockErr, w.tg.Stop()) + return addrs, nil } -// AllAddresses returns all addresses that the wallet is able to spend from, -// including unseeded addresses. Addresses are returned sorted in byte-order. +// AllAddresses returns all addresses that the wallet is able to spend from. +// Addresses are returned sorted in byte-order. func (w *Wallet) AllAddresses() ([]types.Address, error) { if err := w.tg.Add(); err != nil { - return []types.Address{}, modules.ErrWalletShutdown + return nil, err } defer w.tg.Done() - w.mu.RLock() - defer w.mu.RUnlock() + w.mu.Lock() + defer w.mu.Unlock() addrs := make([]types.Address, 0, len(w.keys)) for addr := range w.keys { @@ -266,51 +123,215 @@ func (w *Wallet) AllAddresses() ([]types.Address, error) { return addrs, nil } -// Rescanning reports whether the wallet is currently rescanning the -// blockchain. -func (w *Wallet) Rescanning() (bool, error) { - if err := w.tg.Add(); err != nil { - return false, modules.ErrWalletShutdown - } - defer w.tg.Done() +// UnspentSiacoinOutputs returns the unspent SC outputs of the wallet. +func (w *Wallet) UnspentSiacoinOutputs() (sces []types.SiacoinElement) { + w.mu.Lock() + defer w.mu.Unlock() - rescanning := !w.scanLock.TryLock() - if !rescanning { - w.scanLock.Unlock() + for _, sce := range w.sces { + sces = append(sces, sce) } - return rescanning, nil + + return } -// Settings returns the wallet's current settings. -func (w *Wallet) Settings() (modules.WalletSettings, error) { - if err := w.tg.Add(); err != nil { - return modules.WalletSettings{}, modules.ErrWalletShutdown +// UnspentSiafundOutputs returns the unspent SF outputs of the wallet. +func (w *Wallet) UnspentSiafundOutputs() (sfes []types.SiafundElement) { + w.mu.Lock() + defer w.mu.Unlock() + + for _, sfe := range w.sfes { + sfes = append(sfes, sfe) } - defer w.tg.Done() - return modules.WalletSettings{ - NoDefrag: w.defragDisabled, - }, nil + + return } -// SetSettings will update the settings for the wallet. -func (w *Wallet) SetSettings(s modules.WalletSettings) error { - if err := w.tg.Add(); err != nil { - return modules.ErrWalletShutdown +// Annotate annotates the given transactions with the wallet. +func (w *Wallet) Annotate(pool []types.Transaction) []modules.PoolTransaction { + w.mu.Lock() + defer w.mu.Unlock() + + var annotated []modules.PoolTransaction + for _, txn := range pool { + ptxn := Annotate(txn, func(a types.Address) bool { + _, ok := w.addrs[a] + return ok + }) + if ptxn.Type != "unrelated" { + annotated = append(annotated, ptxn) + } } - defer w.tg.Done() - w.mu.Lock() - w.defragDisabled = s.NoDefrag - w.mu.Unlock() + return annotated +} + +func (w *Wallet) sync(index types.ChainIndex) error { + for index != w.cm.Tip() { + select { + case <-w.tg.StopChan(): + return nil + default: + } + crus, caus, err := w.cm.UpdatesSince(index, 100) + if err != nil { + w.log.Error("failed to subscribe to chain manager", zap.Error(err)) + return err + } else if err := w.UpdateChainState(crus, caus); err != nil { + w.log.Error("failed to update chain state", zap.Error(err)) + return err + } + if len(caus) > 0 { + index = caus[len(caus)-1].State.Index + } + } return nil } -// managedCanSpendUnlockHash returns true if and only if the the wallet -// has keys to spend from outputs with the given address. -func (w *Wallet) managedCanSpendUnlockHash(unlockHash types.Address) bool { - w.mu.RLock() - defer w.mu.RUnlock() +func (w *Wallet) subscribe() { + if err := w.sync(w.tip); err != nil { + return + } + + reorgChan := make(chan types.ChainIndex, 1) + unsubscribe := w.cm.OnReorg(func(index types.ChainIndex) { + select { + case reorgChan <- index: + default: + } + }) + defer unsubscribe() + + for { + select { + case <-w.tg.StopChan(): + return + case <-reorgChan: + } + + if err := w.sync(w.tip); err != nil { + w.log.Error("failed to sync wallet", zap.Error(err)) + } + } +} + +// New creates a new wallet. +func New(db *sql.DB, cm *chain.Manager, s modules.Syncer, seed, dir string) (*Wallet, error) { + var entropy modules.Seed + if err := modules.SeedFromPhrase(&entropy, seed); err != nil { + return nil, modules.AddContext(err, "unable to decode seed phrase") + } + + logger, closeFn, err := persist.NewFileLogger(filepath.Join(dir, "wallet.log")) + if err != nil { + return nil, modules.AddContext(err, "unable to create logger") + } + + w := &Wallet{ + cm: cm, + s: s, + db: db, + log: logger, + closeFn: closeFn, + used: make(map[types.Hash256]bool), + addrs: make(map[types.Address]uint64), + keys: make(map[types.Address]types.PrivateKey), + lookahead: make(map[types.Address]uint64), + unusedKeys: make(map[types.Address]types.UnlockConditions), + watchedAddrs: make(map[types.Address]uint64), + sces: make(map[types.Address]types.SiacoinElement), + sfes: make(map[types.Address]types.SiafundElement), + } + + if err := w.load(); err != nil { + return nil, modules.AddContext(err, "unable to load wallet") + } + + go w.threadedSaveWallet() + + if entropy != w.seed { + w.log.Info("new seed detected, rescanning") + w.tip = types.ChainIndex{} + w.addrs = make(map[types.Address]uint64) + w.keys = make(map[types.Address]types.PrivateKey) + w.lookahead = make(map[types.Address]uint64) + w.sces = make(map[types.Address]types.SiacoinElement) + w.sfes = make(map[types.Address]types.SiafundElement) + if err := w.reset(); err != nil { + return nil, modules.AddContext(err, "couldn't reset database before rescanning") + } + + go func() { + if err := w.tg.Add(); err != nil { + w.log.Error("couldn't start thread", zap.Error(err)) + return + } + defer w.tg.Done() + + fmt.Println("Wallet: waiting for the consensus to sync before scanning...") + for { + if w.synced() { + break + } + select { + case <-w.tg.StopChan(): + return + case <-time.After(5 * time.Second): + } + } + + copy(w.seed[:], entropy[:]) + dustThreshold := w.DustThreshold() + scanner := newSeedScanner(w.seed, dustThreshold) + if err := scanner.scan(cm, w.tg.StopChan()); err != nil { + w.log.Error("blockchain scan failed", zap.Error(err)) + return + } + + progress := scanner.largestIndexSeen + 1 + progress += progress / 10 + w.log.Info("blockchain scan finished", zap.Uint64("index", scanner.largestIndexSeen), zap.Uint64("progress", progress)) + w.generate(progress) + if err := w.saveSeed(progress); err != nil { + w.log.Error("couldn't save new seed", zap.Error(err)) + return + } + + w.subscribe() + }() + + return w, nil + } else { + go w.subscribe() + } + + return w, nil +} + +// synced returns true if we are synced with the blockchain. +func (w *Wallet) synced() bool { + lastBlockTimestamp := w.cm.TipState().PrevTimestamps[0] + return time.Since(lastBlockTimestamp) < 24*time.Hour && w.s.Synced() +} + +// threadedSaveWallet periodically saves the wallet state. +func (w *Wallet) threadedSaveWallet() { + err := w.tg.Add() + if err != nil { + return + } + defer w.tg.Done() - _, isSpendable := w.keys[unlockHash] - return isSpendable + for { + select { + case <-w.tg.StopChan(): + return + case <-time.After(2 * time.Minute): + } + w.mu.Lock() + if err := w.save(); err != nil { + w.log.Error("couldn't save wallet", zap.Error(err)) + } + w.mu.Unlock() + } } diff --git a/node/api/api.go b/node/api/api.go index 399ce73..1e592d4 100644 --- a/node/api/api.go +++ b/node/api/api.go @@ -1,195 +1,196 @@ package api import ( - "encoding/json" - "errors" - "fmt" - "log" - "net/http" - "strings" - "sync" - "github.com/mike76-dev/sia-satellite/modules" + "go.sia.tech/core/types" ) -const ( - // StatusModuleNotLoaded is a custom http code to indicate that a module - // wasn't yet loaded by the Daemon and can therefore not be reached. - StatusModuleNotLoaded = 490 -) +// DaemonVersion holds the version information for satd. +type DaemonVersion struct { + Version string `json:"version"` + GitRevision string `json:"gitRevision"` + BuildTime string `json:"buildTime"` +} -// ErrAPICallNotRecognized is returned by API client calls made to modules that -// are not yet loaded. -var ErrAPICallNotRecognized = errors.New("API call not recognized") - -// Error is a type that is encoded as JSON and returned in an API response in -// the event of an error. Only the Message field is required. More fields may -// be added to this struct in the future for better error reporting. -type Error struct { - // Message describes the error in English. Typically it is set to - // `err.Error()`. This field is required. - Message string `json:"message"` -} - -// Error implements the error interface for the Error type. It returns only the -// Message field. -func (err Error) Error() string { - return err.Message -} - -// HttpGET is a utility function for making http get requests with a -// whitelisted user-agent. A non-2xx response does not return an error. -func HttpGET(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", "Sat-Agent") - return http.DefaultClient.Do(req) -} - -// HttpGETAuthenticated is a utility function for making authenticated http get -// requests with a whitelisted user-agent and the supplied password. A -// non-2xx response does not return an error. -func HttpGETAuthenticated(url string, password string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", "Sat-Agent") - req.SetBasicAuth("", password) - return http.DefaultClient.Do(req) -} - -// HttpPOST is a utility function for making post requests with a -// whitelisted user-agent. A non-2xx response does not return an error. -func HttpPOST(url string, data string) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, strings.NewReader(data)) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", "Sat-Agent") - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return http.DefaultClient.Do(req) -} - -// HttpPOSTAuthenticated is a utility function for making authenticated http -// post requests with a whitelisted user-agent and the supplied -// password. A non-2xx response does not return an error. -func HttpPOSTAuthenticated(url string, data string, password string) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, strings.NewReader(data)) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", "Sat-Agent") - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.SetBasicAuth("", password) - return http.DefaultClient.Do(req) -} - -type ( - // API encapsulates a collection of modules and implements a http.Handler - // to access their methods. - API struct { - cs modules.ConsensusSet - gateway modules.Gateway - manager modules.Manager - portal modules.Portal - provider modules.Provider - tpool modules.TransactionPool - wallet modules.Wallet - - router http.Handler - routerMu sync.RWMutex - - requiredUserAgent string - requiredPassword string - modulesSet bool - Shutdown func() error - } -) +// SyncerPeer contains the information about a peer. +type SyncerPeer struct { + Address string `json:"address"` + Version string `json:"version"` + Inbound bool `json:"inbound"` +} + +// ConsensusTipResponse is the response type for /consensus/tip. +type ConsensusTipResponse struct { + Height uint64 `json:"height"` + BlockID types.BlockID `json:"id"` + Synced bool `json:"synced"` +} + +// TxpoolBroadcastRequest is the request type for /txpool/broadcast. +type TxpoolBroadcastRequest struct { + Transactions []types.Transaction `json:"transactions"` + V2Transactions []types.V2Transaction `json:"v2transactions"` +} + +// TxpoolTransactionsResponse is the response type for /txpool/transactions. +type TxpoolTransactionsResponse struct { + Transactions []types.Transaction `json:"transactions"` + V2Transactions []types.V2Transaction `json:"v2transactions"` +} + +// WalletBalanceResponse is the response type for /wallet/balance. +type WalletBalanceResponse struct { + Height uint64 `json:"height"` + Siacoins types.Currency `json:"siacoins"` + ImmatureSiacoins types.Currency `json:"immatureSiacoins"` + IncomingSiacoins types.Currency `json:"incomingSiacoins"` + OutgoingSiacoins types.Currency `json:"outgoingSiacoins"` + Siafunds uint64 `json:"siafunds"` + RecommendedFee types.Currency `json:"recommendedFee"` +} + +// WalletOutputsResponse is the response type for /wallet/outputs. +type WalletOutputsResponse struct { + SiacoinOutputs []types.SiacoinElement `json:"siacoinOutputs"` + SiafundOutputs []types.SiafundElement `json:"siafundOutputs"` +} + +// WalletSendRequest is the request type for /wallet/send. +type WalletSendRequest struct { + Amount types.Currency `json:"amount"` + Destination types.Address `json:"destination"` +} + +// ExchangeRate contains the exchange rate of a given currency. +type ExchangeRate struct { + Currency string `json:"currency"` + Rate float64 `json:"rate"` +} + +// HostAverages contains the host network averages. +type HostAverages struct { + modules.HostAverages + Rate float64 `json:"rate"` +} + +// Renter contains information about the renter. +type Renter struct { + Email string `json:"email"` + PublicKey types.PublicKey `json:"publickey"` +} + +// RentersGET contains the list of the renters. +type RentersGET struct { + Renters []Renter `json:"renters"` +} + +// RenterContract represents a contract formed by the renter. +type RenterContract struct { + // Amount of contract funds that have been spent on downloads. + DownloadSpending types.Currency `json:"downloadspending"` + // Block height that the file contract ends on. + EndHeight uint64 `json:"endheight"` + // Fees paid in order to form the file contract. + Fees types.Currency `json:"fees"` + // Amount of contract funds that have been spent on funding an ephemeral + // account on the host. + FundAccountSpending types.Currency `json:"fundaccountspending"` + // Public key of the renter that formed the contract. + RenterPublicKey types.PublicKey `json:"renterpublickey"` + // Public key of the host the contract was formed with. + HostPublicKey types.PublicKey `json:"hostpublickey"` + // HostVersion is the version of Sia that the host is running. + HostVersion string `json:"hostversion"` + // ID of the file contract. + ID types.FileContractID `json:"id"` + // A signed transaction containing the most recent contract revision. + LastTransaction types.Transaction `json:"lasttransaction"` + // Amount of contract funds that have been spent on maintenance tasks + // such as updating the price table or syncing the ephemeral account + // balance. + MaintenanceSpending modules.MaintenanceSpending `json:"maintenancespending"` + // Address of the host the file contract was formed with. + NetAddress string `json:"netaddress"` + // Remaining funds left to spend on uploads & downloads. + RenterFunds types.Currency `json:"renterfunds"` + // Size of the file contract, which is typically equal to the number of + // bytes that have been uploaded to the host. + Size uint64 `json:"size"` + // Block height that the file contract began on. + StartHeight uint64 `json:"startheight"` + // Amount of contract funds that have been spent on storage. + StorageSpending types.Currency `json:"storagespending"` + // Total cost to the wallet of forming the file contract. + TotalCost types.Currency `json:"totalcost"` + // Amount of contract funds that have been spent on uploads. + UploadSpending types.Currency `json:"uploadspending"` + // Signals if contract is good for uploading data. + GoodForUpload bool `json:"goodforupload"` + // Signals if contract is good for a renewal. + GoodForRenew bool `json:"goodforrenew"` + // Signals if a contract has been marked as bad. + BadContract bool `json:"badcontract"` +} + +// RenterContracts contains the renter's contracts. +type RenterContracts struct { + ActiveContracts []RenterContract `json:"activecontracts"` + PassiveContracts []RenterContract `json:"passivecontracts"` + RefreshedContracts []RenterContract `json:"refreshedcontracts"` + DisabledContracts []RenterContract `json:"disabledcontracts"` + ExpiredContracts []RenterContract `json:"expiredcontracts"` + ExpiredRefreshedContracts []RenterContract `json:"expiredrefreshedcontracts"` +} + +// EmailPreferences contains the email preferences. +type EmailPreferences struct { + Email string `json:"email"` + WarnThreshold types.Currency `json:"threshold"` +} + +// ExtendedHostDBEntry is an extension to modules.HostDBEntry that includes +// the string representation of the public key. +type ExtendedHostDBEntry struct { + modules.HostDBEntry + PublicKeyString string `json:"publickeystring"` +} + +// HostdbHostsGET lists active hosts on the network. +type HostdbHostsGET struct { + Hosts []ExtendedHostDBEntry `json:"hosts"` +} + +// HostdbHostGET lists detailed statistics for a particular host, selected +// by pubkey. +type HostdbHostGET struct { + Entry ExtendedHostDBEntry `json:"entry"` + ScoreBreakdown modules.HostScoreBreakdown `json:"scorebreakdown"` +} + +// HostdbGET holds information about the hostdb. +type HostdbGET struct { + BlockHeight uint64 `json:"blockheight"` + InitialScanComplete bool `json:"initialscancomplete"` +} + +// HostdbFilterModeGET contains the information about the HostDB's +// filtermode. +type HostdbFilterModeGET struct { + FilterMode string `json:"filtermode"` + Hosts []string `json:"hosts"` + NetAddresses []string `json:"netaddresses"` +} + +// HostdbFilterModePOST contains the information needed to set the the +// FilterMode of the hostDB. +type HostdbFilterModePOST struct { + FilterMode string `json:"filtermode"` + Hosts []types.PublicKey `json:"hosts"` + NetAddresses []string `json:"netaddresses"` +} -// ServeHTTP implements the http.Handler interface. -func (api *API) ServeHTTP(w http.ResponseWriter, r *http.Request) { - api.routerMu.RLock() - api.router.ServeHTTP(w, r) - api.routerMu.RUnlock() -} - -// SetModules allows for replacing the modules in the API at runtime. -func (api *API) SetModules(g modules.Gateway, cs modules.ConsensusSet, m modules.Manager, portal modules.Portal, p modules.Provider, tp modules.TransactionPool, w modules.Wallet) { - if api.modulesSet { - log.Fatal("can't call SetModules more than once") - } - api.cs = cs - api.gateway = g - api.manager = m - api.portal = portal - api.provider = p - api.tpool = tp - api.wallet = w - api.modulesSet = true - api.buildHTTPRoutes() -} - -// New creates a new API. The API will require authentication using HTTP basic -// auth for certain endpoints if the supplied password is not the empty string. -// Usernames are ignored for authentication. -func New(requiredUserAgent string, requiredPassword string, g modules.Gateway, cs modules.ConsensusSet, m modules.Manager, portal modules.Portal, p modules.Provider, tp modules.TransactionPool, w modules.Wallet) *API { - api := &API{ - cs: cs, - gateway: g, - manager: m, - portal: portal, - provider: p, - tpool: tp, - wallet: w, - - requiredUserAgent: requiredUserAgent, - requiredPassword: requiredPassword, - } - - // Register API handlers - api.buildHTTPRoutes() - - return api -} - -// UnrecognizedCallHandler handles calls to not-loaded modules. -func (api *API) UnrecognizedCallHandler(w http.ResponseWriter, _ *http.Request) { - var errStr string - errStr = fmt.Sprintf("%d Module not loaded", StatusModuleNotLoaded) - WriteError(w, Error{errStr}, StatusModuleNotLoaded) -} - -// WriteError writes an error to the API caller. -func WriteError(w http.ResponseWriter, err Error, code int) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(code) - encodingErr := json.NewEncoder(w).Encode(err) - if _, isJsonErr := encodingErr.(*json.SyntaxError); isJsonErr { - // Marshalling should only fail in the event of a developer error. - // Specifically, only non-marshallable types should cause an error here. - log.Fatal("failed to encode API error response:", encodingErr) - } -} - -// WriteJSON writes the object to the ResponseWriter. If the encoding fails, an -// error is written instead. The Content-Type of the response header is set -// accordingly. -func WriteJSON(w http.ResponseWriter, obj interface{}) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - err := json.NewEncoder(w).Encode(obj) - if _, isJsonErr := err.(*json.SyntaxError); isJsonErr { - // Marshalling should only fail in the event of a developer error. - // Specifically, only non-marshallable types should cause an error here. - log.Fatal("failed to encode API response:", err) - } -} - -// WriteSuccess writes the HTTP header with status 204 No Content to the -// ResponseWriter. WriteSuccess should only be used to indicate that the -// requested action succeeded AND there is no data to return. -func WriteSuccess(w http.ResponseWriter) { - w.WriteHeader(http.StatusNoContent) +// Announcement contains the information about a portal announcement. +type Announcement struct { + Text string `json:"text"` + Expires uint64 `json:"expires"` } diff --git a/node/api/client/client.go b/node/api/client/client.go index c60d191..e28a7dc 100644 --- a/node/api/client/client.go +++ b/node/api/client/client.go @@ -1,312 +1,81 @@ package client import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/core/consensus" + "go.sia.tech/core/types" + "go.sia.tech/jape" ) -type ( - // A Client makes requests to the satd HTTP API. - Client struct { - Options - } - - // Options defines the options that are available when creating a - // client. - Options struct { - // Address is the API address of the satd server. - Address string - - // Password must match the password of the satd server. - Password string - - // UserAgent must match the User-Agent required by the satd server. If not - // set, it defaults to "Sat-Agent". - UserAgent string - - // CheckRedirect is an optional handler to be called if the request - // receives a redirect status code. - // For more see https://golang.org/pkg/net/http/#Client - CheckRedirect func(req *http.Request, via []*http.Request) error - } -) - -// New creates a new Client using the provided address. The password will be set -// using SATD_API_PASSWORD environment variable and the user agent will be set -// to "Sat-Agent". Both can be changed manually by the caller after the client -// is returned. -func New(opts Options) *Client { - return &Client{ - Options: opts, - } +// A Client provides methods for interacting with the API server. +type Client struct { + c jape.Client } -// DefaultOptions returns the default options for a client. This includes -// setting the default satd user agent to "Sat-Agent" and setting the password -// using SATD_API_PASSWORD environment variable. -func DefaultOptions() (Options, error) { - pwd := os.Getenv("SATD_API_PASSWORD") - if pwd == "" { - return Options{}, errors.New("Could not locate api password") - } - return Options{ - Address: "localhost:9990", - Password: pwd, - UserAgent: "Sat-Agent", - }, nil +// DaemonVersion returns the current version of satd. +func (c *Client) DaemonVersion() (resp api.DaemonVersion, err error) { + err = c.c.GET("/daemon/version", &resp) + return } -// NewRequest constructs a request to the satd HTTP API, setting the correct -// User-Agent and Basic Auth. The resource path must begin with /. -func (c *Client) NewRequest(method, resource string, body io.Reader) (*http.Request, error) { - url := "http://" + c.Address + resource - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - agent := c.UserAgent - if agent == "" { - agent = "Sat-Agent" - } - req.Header.Set("User-Agent", agent) - if c.Password != "" { - req.SetBasicAuth("", c.Password) - } - return req, nil +// SyncerPeers returns the current peers of the syncer. +func (c *Client) SyncerPeers() (resp []api.SyncerPeer, err error) { + err = c.c.GET("/syncer/peers", &resp) + return } -// drainAndClose reads rc until EOF and then closes it. drainAndClose should -// always be called on HTTP response bodies, because if the body is not fully -// read, the underlying connection can't be reused. -func drainAndClose(rc io.ReadCloser) { - io.Copy(ioutil.Discard, rc) - rc.Close() +// SyncerConnect adds the address as a peer of the syncer. +func (c *Client) SyncerConnect(addr string) (err error) { + err = c.c.POST("/syncer/connect", addr, nil) + return } -// readAPIError decodes and returns an api.Error. -func readAPIError(r io.Reader) error { - var apiErr api.Error - b, _ := ioutil.ReadAll(r) - if err := json.NewDecoder(bytes.NewReader(b)).Decode(&apiErr); err != nil { - fmt.Println("raw resp", string(b)) - return modules.AddContext(err, "could not read error response") - } - - if modules.ContainsError(apiErr, ErrPeerExists) { - return ErrPeerExists - } - - return apiErr +// SyncerBroadcastBlock broadcasts a block to all peers. +func (c *Client) SyncerBroadcastBlock(b types.Block) (err error) { + err = c.c.POST("/syncer/broadcast/block", b, nil) + return } -// getRawResponse requests the specified resource. The response, if provided, -// will be returned in a byte slice. -func (c *Client) getRawResponse(resource string) (http.Header, []byte, error) { - header, reader, err := c.getReaderResponse(resource) - if err != nil { - return nil, nil, modules.AddContext(err, "failed to get reader response") - } - // Possible to get a nil reader if there is no response. - if reader == nil { - return header, nil, nil - } - defer drainAndClose(reader) - d, err := ioutil.ReadAll(reader) - return header, d, modules.AddContext(err, "failed to read all bytes from reader") +// ConsensusNetwork returns the node's network metadata. +func (c *Client) ConsensusNetwork() (resp *consensus.Network, err error) { + resp = new(consensus.Network) + err = c.c.GET("/consensus/network", resp) + return } -// getReaderResponse requests the specified resource. The response, if provided, -// will be returned as an io.Reader. -func (c *Client) getReaderResponse(resource string) (http.Header, io.ReadCloser, error) { - req, err := c.NewRequest("GET", resource, nil) - if err != nil { - return nil, nil, modules.AddContext(err, "failed to construct GET request") - } - httpClient := http.Client{CheckRedirect: c.CheckRedirect} - res, err := httpClient.Do(req) - if err != nil { - return nil, nil, modules.AddContext(err, "GET request failed") - } - - // Add ErrAPICallNotRecognized if StatusCode is StatusModuleNotLoaded to - // allow for handling of modules that are not loaded. - if res.StatusCode == api.StatusModuleNotLoaded { - err = modules.ComposeErrors(readAPIError(res.Body), api.ErrAPICallNotRecognized) - return nil, nil, fmt.Errorf("unable to perform GET on %s: %s", resource, err) - } - - // If the status code is not 2xx, decode and return the accompanying - // api.Error. - if res.StatusCode < 200 || res.StatusCode > 299 { - err := readAPIError(res.Body) - drainAndClose(res.Body) - return nil, nil, modules.AddContext(err, "GET request error") - } - - if res.StatusCode == http.StatusNoContent { - // No reason to read the response. - drainAndClose(res.Body) - return res.Header, nil, nil - } - return res.Header, res.Body, nil +// ConsensusTip returns the current tip index. +func (c *Client) ConsensusTip() (resp api.ConsensusTipResponse, err error) { + err = c.c.GET("/consensus/tip", &resp) + return } -// getRawResponse requests part of the specified resource. The response, if -// provided, will be returned in a byte slice. -func (c *Client) getRawPartialResponse(resource string, from, to uint64) ([]byte, error) { - req, err := c.NewRequest("GET", resource, nil) - if err != nil { - return nil, modules.AddContext(err, "failed to construct GET request") - } - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", from, to - 1)) - - httpClient := http.Client{CheckRedirect: c.CheckRedirect} - res, err := httpClient.Do(req) - if err != nil { - return nil, modules.AddContext(err, "GET request failed") - } - defer drainAndClose(res.Body) - - // Add ErrAPICallNotRecognized if StatusCode is StatusModuleNotLoaded to allow for - // handling of modules that are not loaded. - if res.StatusCode == api.StatusModuleNotLoaded { - err = modules.ComposeErrors(readAPIError(res.Body), api.ErrAPICallNotRecognized) - return nil, fmt.Errorf("unable to perform GET on %s: %s", resource, err) - } - - // If the status code is not 2xx, decode and return the accompanying - // api.Error. - if res.StatusCode < 200 || res.StatusCode > 299 { - return nil, modules.AddContext(readAPIError(res.Body), "GET request error") - } - - if res.StatusCode == http.StatusNoContent { - // No reason to read the response. - return []byte{}, nil - } - return ioutil.ReadAll(res.Body) -} - -// get requests the specified resource. The response, if provided, will be -// decoded into obj. The resource path must begin with /. -func (c *Client) get(resource string, obj interface{}) error { - // Request resource. - _, data, err := c.getRawResponse(resource) - if err != nil { - return err - } - if obj == nil { - // No need to decode response. - return nil - } - - // Decode response. - buf := bytes.NewBuffer(data) - err = json.NewDecoder(buf).Decode(obj) - if err != nil { - return modules.AddContext(err, "could not read response") - } - return nil +// ConsensusTipState returns the current tip state. +func (c *Client) ConsensusTipState() (resp consensus.State, err error) { + err = c.c.GET("/consensus/tipstate", &resp) + return } -// head makes a HEAD request to the resource at `resource`. The headers that are -// returned are the headers that would be returned if requesting the same -// `resource` using a GET request. -func (c *Client) head(resource string) (int, http.Header, error) { - req, err := c.NewRequest("HEAD", resource, nil) - if err != nil { - return 0, nil, modules.AddContext(err, "failed to construct HEAD request") - } - httpClient := http.Client{CheckRedirect: c.CheckRedirect} - res, err := httpClient.Do(req) - if err != nil { - return 0, nil, modules.AddContext(err, "HEAD request failed") - } - return res.StatusCode, res.Header, nil +// TxpoolTransactions returns all transactions in the transaction pool. +func (c *Client) TxpoolTransactions() (txns []types.Transaction, v2txns []types.V2Transaction, err error) { + var resp api.TxpoolTransactionsResponse + err = c.c.GET("/txpool/transactions", &resp) + return resp.Transactions, resp.V2Transactions, err } -// postRawResponse requests the specified resource. The response, if provided, -// will be returned in a byte slice -func (c *Client) postRawResponse(resource string, body io.Reader) (http.Header, []byte, error) { - // Default the Content-Type header to "application/x-www-form-urlencoded", - // if the caller is performing a multipart form-data upload they can do so by - // using `postRawResponseWithHeaders` and manually set the Content-Type - // header themselves. - headers := http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}} - return c.postRawResponseWithHeaders(resource, body, headers) +// TxpoolFee returns the recommended fee (per weight unit) to ensure a high +// probability of inclusion in the next block. +func (c *Client) TxpoolFee() (resp types.Currency, err error) { + err = c.c.GET("/txpool/fee", &resp) + return } -// postRawResponseWithHeaders requests the specified resource and allows to pass -// custom headers. The response, if provided, will be returned in a byte slice. -func (c *Client) postRawResponseWithHeaders(resource string, body io.Reader, headers http.Header) (http.Header, []byte, error) { - req, err := c.NewRequest("POST", resource, body) - if err != nil { - return http.Header{}, nil, modules.AddContext(err, "failed to construct POST request") - } - - // Decorate the headers on the request object. - for k, v := range headers { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - httpClient := http.Client{CheckRedirect: c.CheckRedirect} - res, err := httpClient.Do(req) - if err != nil { - return http.Header{}, nil, modules.AddContext(err, "POST request failed") - } - defer drainAndClose(res.Body) - - // Add ErrAPICallNotRecognized if StatusCode is StatusModuleNotLoaded to allow for - // handling of modules that are not loaded. - if res.StatusCode == api.StatusModuleNotLoaded { - err = modules.ComposeErrors(readAPIError(res.Body), api.ErrAPICallNotRecognized) - return http.Header{}, nil, fmt.Errorf("unable to perform POST on %s: %s", resource, err) - } - - // If the status code is not 2xx, decode and return the accompanying - // api.Error. - if res.StatusCode < 200 || res.StatusCode > 299 { - return http.Header{}, nil, modules.AddContext(readAPIError(res.Body), "POST request error") - } - - if res.StatusCode == http.StatusNoContent { - // No reason to read the response. - return res.Header, []byte{}, nil - } - d, err := ioutil.ReadAll(res.Body) - return res.Header, d, err +// NewClient returns a client that communicates with the API server listening +// on the specified address. +func NewClient() *Client { + return &Client{} } -// post makes a POST request to the resource at `resource`, using `data` as the -// request body. The response, if provided, will be decoded into `obj`. -func (c *Client) post(resource string, data string, obj interface{}) error { - // Request resource. - _, body, err := c.postRawResponse(resource, strings.NewReader(data)) - if err != nil { - return err - } - if obj == nil { - // No need to decode response. - return nil - } - - // Decode response. - buf := bytes.NewBuffer(body) - err = json.NewDecoder(buf).Decode(obj) - if err != nil { - return modules.AddContext(err, "could not read response") - } - return nil +// Client returns the underlying jape.Client. +func (c *Client) Client() *jape.Client { + return &c.c } diff --git a/node/api/client/consensus.go b/node/api/client/consensus.go deleted file mode 100644 index 906713a..0000000 --- a/node/api/client/consensus.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/mike76-dev/sia-satellite/node/api" - - "go.sia.tech/core/types" -) - -// ConsensusGet requests the /consensus api resource. -func (c *Client) ConsensusGet() (cg api.ConsensusGET, err error) { - err = c.get("/consensus", &cg) - return -} - -// ConsensusBlocksIDGet requests the /consensus/blocks api resource. -func (c *Client) ConsensusBlocksIDGet(id types.BlockID) (cbg api.ConsensusBlocksGet, err error) { - err = c.get("/consensus/blocks?id=" + id.String(), &cbg) - return -} - -// ConsensusBlocksHeightGet requests the /consensus/blocks api resource. -func (c *Client) ConsensusBlocksHeightGet(height uint64) (cbg api.ConsensusBlocksGet, err error) { - err = c.get("/consensus/blocks?height=" + fmt.Sprint(height), &cbg) - return -} diff --git a/node/api/client/daemon.go b/node/api/client/daemon.go deleted file mode 100644 index 3fd5e73..0000000 --- a/node/api/client/daemon.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import ( - "github.com/mike76-dev/sia-satellite/node/api" -) - -// DaemonAlertsGet requests the /daemon/alerts resource. -func (c *Client) DaemonAlertsGet() (dag api.DaemonAlertsGet, err error) { - err = c.get("/daemon/alerts", &dag) - return -} - -// DaemonVersionGet requests the /daemon/version resource. -func (c *Client) DaemonVersionGet() (dvg api.DaemonVersionGet, err error) { - err = c.get("/daemon/version", &dvg) - return -} - -// DaemonStopGet stops the daemon using the /daemon/stop endpoint. -func (c *Client) DaemonStopGet() (err error) { - err = c.get("/daemon/stop", nil) - return -} diff --git a/node/api/client/gateway.go b/node/api/client/gateway.go deleted file mode 100644 index cd85595..0000000 --- a/node/api/client/gateway.go +++ /dev/null @@ -1,91 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/node/api" -) - -var ( - // ErrPeerExists indicates that two peers are already connected. The string - // of this error needs to be updated if the string of errPeerExists in the - // gateway package is changed. - ErrPeerExists = errors.New("already connected to this peer") -) - -// GatewayConnectPost uses the /gateway/connect/:address endpoint to connect to -// the gateway at address. -func (c *Client) GatewayConnectPost(address modules.NetAddress) (err error) { - err = c.post("/gateway/connect/" + string(address), "", nil) - if err != nil && modules.ContainsError(err, ErrPeerExists) { - err = ErrPeerExists - } - return -} - -// GatewayDisconnectPost uses the /gateway/disconnect/:address endpoint to -// disconnect the gateway from a peer. -func (c *Client) GatewayDisconnectPost(address modules.NetAddress) (err error) { - err = c.post("/gateway/disconnect/" + string(address), "", nil) - return -} - -// GatewayGet requests the /gateway api resource. -func (c *Client) GatewayGet() (gwg api.GatewayGET, err error) { - err = c.get("/gateway", &gwg) - return -} - -// GatewayBlocklistGet uses the /gateway/blocklist endpoint to request the -// Gateway's blocklist. -func (c *Client) GatewayBlocklistGet() (gbg api.GatewayBlocklistGET, err error) { - err = c.get("/gateway/blocklist", &gbg) - return -} - -// GatewayAppendBlocklistPost uses the /gateway/blocklist endpoint to append -// addresses to the Gateway's blocklist. -func (c *Client) GatewayAppendBlocklistPost(addresses []string) (err error) { - gbp := api.GatewayBlocklistPOST{ - Action: "append", - Addresses: addresses, - } - data, err := json.Marshal(gbp) - if err != nil { - return err - } - err = c.post("/gateway/blocklist", string(data), nil) - return -} - -// GatewayRemoveBlocklistPost uses the /gateway/blocklist endpoint to remove -// addresses from the Gateway's blocklist. -func (c *Client) GatewayRemoveBlocklistPost(addresses []string) (err error) { - gbp := api.GatewayBlocklistPOST{ - Action: "remove", - Addresses: addresses, - } - data, err := json.Marshal(gbp) - if err != nil { - return err - } - err = c.post("/gateway/blocklist", string(data), nil) - return -} - -// GatewaySetBlocklistPost uses the /gateway/blocklist endpoint to set the -// Gateway's blocklist. -func (c *Client) GatewaySetBlocklistPost(addresses []string) (err error) { - gbp := api.GatewayBlocklistPOST{ - Action: "set", - Addresses: addresses, - } - data, err := json.Marshal(gbp) - if err != nil { - return err - } - err = c.post("/gateway/blocklist", string(data), nil) - return -} diff --git a/node/api/client/hostdb.go b/node/api/client/hostdb.go index dcc5a17..22e9f82 100644 --- a/node/api/client/hostdb.go +++ b/node/api/client/hostdb.go @@ -1,40 +1,38 @@ package client import ( - "encoding/json" - - "github.com/mike76-dev/sia-satellite/node/api" "github.com/mike76-dev/sia-satellite/modules" + "github.com/mike76-dev/sia-satellite/node/api" "go.sia.tech/core/types" ) -// HostDbGet requests the /hostdb endpoint's resources. -func (c *Client) HostDbGet() (hdg api.HostdbGet, err error) { - err = c.get("/hostdb", &hdg) +// HostDb requests the /hostdb endpoint's resources. +func (c *Client) HostDb() (hdg api.HostdbGET, err error) { + err = c.c.GET("/hostdb", &hdg) return } -// HostDbActiveGet requests the /hostdb/active endpoint's resources. -func (c *Client) HostDbActiveGet() (hdag api.HostdbActiveGET, err error) { - err = c.get("/hostdb/active", &hdag) +// HostDbActiveHosts requests the /hostdb/active endpoint's resources. +func (c *Client) HostDbActiveHosts() (hdag api.HostdbHostsGET, err error) { + err = c.c.GET("/hostdb/active", &hdag) return } -// HostDbAllGet requests the /hostdb/all endpoint's resources. -func (c *Client) HostDbAllGet() (hdag api.HostdbAllGET, err error) { - err = c.get("/hostdb/all", &hdag) +// HostDbAllHosts requests the /hostdb/all endpoint's resources. +func (c *Client) HostDbAllHosts() (hdag api.HostdbHostsGET, err error) { + err = c.c.GET("/hostdb/all", &hdag) return } -// HostDbFilterModeGet requests the /hostdb/filtermode GET endpoint. -func (c *Client) HostDbFilterModeGet() (hdfmg api.HostdbFilterModeGET, err error) { - err = c.get("/hostdb/filtermode", &hdfmg) +// HostDbFilterMode requests the /hostdb/filtermode GET endpoint. +func (c *Client) HostDbFilterMode() (hdfmg api.HostdbFilterModeGET, err error) { + err = c.c.GET("/hostdb/filtermode", &hdfmg) return } -// HostDbFilterModePost requests the /hostdb/filtermode POST endpoint. -func (c *Client) HostDbFilterModePost(fm modules.FilterMode, hosts []types.PublicKey, netAddresses []string) (err error) { +// HostDbSetFilterMode requests the /hostdb/filtermode POST endpoint. +func (c *Client) HostDbSetFilterMode(fm modules.FilterMode, hosts []types.PublicKey, netAddresses []string) (err error) { filterMode := fm.String() hdblp := api.HostdbFilterModePOST{ FilterMode: filterMode, @@ -42,16 +40,11 @@ func (c *Client) HostDbFilterModePost(fm modules.FilterMode, hosts []types.Publi NetAddresses: netAddresses, } - data, err := json.Marshal(hdblp) - if err != nil { - return err - } - err = c.post("/hostdb/FilterMode", string(data), nil) - return + return c.c.POST("/hostdb/filtermode", &hdblp, nil) } -// HostDbHostsGet request the /hostdb/hosts/:pubkey endpoint's resources. -func (c *Client) HostDbHostsGet(pk types.PublicKey) (hhg api.HostdbHostsGET, err error) { - err = c.get("/hostdb/hosts/" + pk.String(), &hhg) +// HostDbHost request the /hostdb/host/:publickey endpoint's resources. +func (c *Client) HostDbHost(pk types.PublicKey) (hhg api.HostdbHostGET, err error) { + err = c.c.GET("/hostdb/host/"+pk.String(), &hhg) return } diff --git a/node/api/client/manager.go b/node/api/client/manager.go index d558d1f..e58dc7d 100644 --- a/node/api/client/manager.go +++ b/node/api/client/manager.go @@ -1,105 +1,81 @@ package client import ( - "encoding/json" - "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/node/api" ) -// ManagerAveragesGet requests the /manager/averages resource. -func (c *Client) ManagerAveragesGet(currency string) (ha api.HostAverages, err error) { - url := "/manager/averages/" + currency - err = c.get(url, &ha) +// ManagerAverages requests the /manager/averages resource. +func (c *Client) ManagerAverages(currency string) (ha api.HostAverages, err error) { + err = c.c.GET("/manager/averages/"+currency, &ha) return } -// ManagerContractsGet requests the /manager/contracts resource. -func (c *Client) ManagerContractsGet(key string) (rc api.RenterContracts, err error) { - url := "/manager/contracts" - if key != "" { - url = url + "/" + key - } - err = c.get(url, &rc) +// ManagerContracts requests the /manager/contracts resource. +func (c *Client) ManagerContracts(key string) (rc api.RenterContracts, err error) { + err = c.c.GET("/manager/contracts/"+key, &rc) return } -// ManagerRenterGet requests the /manager/renter resource. -func (c *Client) ManagerRenterGet(key string) (r modules.Renter, err error) { - url := "/manager/renter/" + key - err = c.get(url, &r) +// ManagerRenter requests the /manager/renter resource. +func (c *Client) ManagerRenter(key string) (r modules.Renter, err error) { + err = c.c.GET("/manager/renter/"+key, &r) return } -// ManagerBalanceGet requests the /manager/balance resource. -func (c *Client) ManagerBalanceGet(key string) (ub modules.UserBalance, err error) { - url := "/manager/balance/" + key - err = c.get(url, &ub) +// ManagerBalance requests the /manager/balance resource. +func (c *Client) ManagerBalance(key string) (ub modules.UserBalance, err error) { + err = c.c.GET("/manager/balance/"+key, &ub) return } -// ManagerRentersGet requests the /manager/renters resource. -func (c *Client) ManagerRentersGet() (rg api.RentersGET, err error) { - err = c.get("/manager/renters", &rg) +// ManagerRenters requests the /manager/renters resource. +func (c *Client) ManagerRenters() (rg api.RentersGET, err error) { + err = c.c.GET("/manager/renters", &rg) return } -// ManagerPreferencesGet requests the /manager/preferences resource. -func (c *Client) ManagerPreferencesGet() (ep api.EmailPreferences, err error) { - err = c.get("/manager/preferences", &ep) +// ManagerPreferences requests the /manager/preferences resource. +func (c *Client) ManagerPreferences() (ep api.EmailPreferences, err error) { + err = c.c.GET("/manager/preferences", &ep) return } -// ManagerPreferencesPost uses the /manager/preferences resource to change +// ManagerUpdatePreferences uses the /manager/preferences resource to change // the email preferences. -func (c *Client) ManagerPreferencesPost(ep api.EmailPreferences) error { - json, err := json.Marshal(ep) - if err != nil { - return err - } - err = c.post("/manager/preferences", string(json), nil) - return err +func (c *Client) ManagerUpdatePreferences(ep api.EmailPreferences) error { + return c.c.POST("/manager/preferences", &ep, nil) } -// ManagerPricesGet requests the /manager/prices resource. -func (c *Client) ManagerPricesGet() (prices modules.Pricing, err error) { - err = c.get("/manager/prices", &prices) +// ManagerPrices requests the /manager/prices resource. +func (c *Client) ManagerPrices() (prices modules.Pricing, err error) { + err = c.c.GET("/manager/prices", &prices) return } -// ManagerPricesPost uses the /manager/prices resource to change +// ManagerUpdatePrices uses the /manager/prices resource to change // the current prices. -func (c *Client) ManagerPricesPost(prices modules.Pricing) error { - json, err := json.Marshal(prices) - if err != nil { - return err - } - err = c.post("/manager/prices", string(json), nil) - return err +func (c *Client) ManagerUpdatePrices(prices modules.Pricing) error { + return c.c.POST("/manager/prices", &prices, nil) } -// ManagerMaintenanceGet requests the /manager/maintenance resource. -func (c *Client) ManagerMaintenanceGet() (maintenance bool, err error) { +// ManagerMaintenance requests the /manager/maintenance resource. +func (c *Client) ManagerMaintenance() (maintenance bool, err error) { var req struct { Maintenance bool `json:"maintenance"` } - err = c.get("/manager/maintenance", &req) + err = c.c.GET("/manager/maintenance", &req) if err != nil { return false, err } return req.Maintenance, nil } -// ManagerMaintenancePost uses the /manager/maintenance resource to set +// ManagerSetMaintenance uses the /manager/maintenance resource to set // or clear the maintenance flag. -func (c *Client) ManagerMaintenancePost(start bool) error { +func (c *Client) ManagerSetMaintenance(start bool) error { req := struct { Start bool `json:"start"` }{Start: start} - json, err := json.Marshal(req) - if err != nil { - return err - } - err = c.post("/manager/maintenance", string(json), nil) - return err + return c.c.POST("/manager/maintenance", &req, nil) } diff --git a/node/api/client/portal.go b/node/api/client/portal.go index 5ef79f4..183ab42 100644 --- a/node/api/client/portal.go +++ b/node/api/client/portal.go @@ -1,50 +1,36 @@ package client import ( - "encoding/json" - "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/node/api" ) -// PortalCreditsGet requests the /portal/credits resource. -func (c *Client) PortalCreditsGet() (credits modules.CreditData, err error) { - url := "/portal/credits" - err = c.get(url, &credits) +// PortalCredits requests the /portal/credits resource. +func (c *Client) PortalCredits() (credits modules.CreditData, err error) { + err = c.c.GET("/portal/credits", &credits) return } -// PortalCreditsPost requests the /portal/credits resource. -func (c *Client) PortalCreditsPost(credits modules.CreditData) (err error) { - data, err := json.Marshal(credits) - if err != nil { - return err - } - err = c.post("/portal/credits", string(data), nil) - return +// PortalSetCredits requests the /portal/credits resource. +func (c *Client) PortalSetCredits(credits modules.CreditData) (err error) { + return c.c.POST("/portal/credits", &credits, nil) } -// PortalAnnouncementGet requests the /portal/announcement resource. -func (c *Client) PortalAnnouncementGet() (string, uint64, error) { - url := "/portal/announcement" +// PortalAnnouncement requests the /portal/announcement resource. +func (c *Client) PortalAnnouncement() (string, uint64, error) { var req api.Announcement - err := c.get(url, &req) + err := c.c.GET("/portal/announcement", &req) if err != nil { return "", 0, err } return req.Text, req.Expires, nil } -// PortalAnnouncementPost requests the /portal/announcement resource. -func (c *Client) PortalAnnouncementPost(text string, expires uint64) (err error) { +// PortalSetAnnouncement requests the /portal/announcement resource. +func (c *Client) PortalSetAnnouncement(text string, expires uint64) (err error) { req := api.Announcement{ Text: text, Expires: expires, } - data, err := json.Marshal(req) - if err != nil { - return err - } - err = c.post("/portal/announcement", string(data), nil) - return + return c.c.POST("/portal/announcement", &req, nil) } diff --git a/node/api/client/transactionpool.go b/node/api/client/transactionpool.go deleted file mode 100644 index 4b661bb..0000000 --- a/node/api/client/transactionpool.go +++ /dev/null @@ -1,44 +0,0 @@ -package client - -import ( - "bytes" - "encoding/base64" - "net/url" - - "github.com/mike76-dev/sia-satellite/node/api" - - "go.sia.tech/core/types" -) - -// TransactionPoolFeeGet uses the /tpool/fee endpoint to get a fee estimation. -func (c *Client) TransactionPoolFeeGet() (tfg api.TpoolFeeGET, err error) { - err = c.get("/tpool/fee", &tfg) - return -} - -// TransactionPoolRawPost uses the /tpool/raw endpoint to send a raw -// transaction to the transaction pool. -func (c *Client) TransactionPoolRawPost(txn types.Transaction, parents []types.Transaction) (err error) { - var p, t bytes.Buffer - e := types.NewEncoder(&p) - e.WritePrefix(len(parents)) - for _, parent := range parents { - parent.EncodeTo(e) - } - e.Flush() - e = types.NewEncoder(&t) - txn.EncodeTo(e) - e.Flush() - values := url.Values{} - values.Set("transaction", base64.StdEncoding.EncodeToString(t.Bytes())) - values.Set("parents", base64.StdEncoding.EncodeToString(p.Bytes())) - err = c.post("/tpool/raw", values.Encode(), nil) - return -} - -// TransactionPoolTransactionsGet uses the /tpool/transactions endpoint to get the -// transactions of the tpool. -func (c *Client) TransactionPoolTransactionsGet() (tptg api.TpoolTxnsGET, err error) { - err = c.get("/tpool/transactions", &tptg) - return -} diff --git a/node/api/client/wallet.go b/node/api/client/wallet.go index 5f8215e..cf2c818 100644 --- a/node/api/client/wallet.go +++ b/node/api/client/wallet.go @@ -1,238 +1,67 @@ package client import ( - "encoding/json" "fmt" - "net/url" - "strconv" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/node/api" - "go.sia.tech/core/types" ) -// WalletAddressGet requests a new address from the /wallet/address endpoint. -func (c *Client) WalletAddressGet() (wag api.WalletAddressGET, err error) { - err = c.get("/wallet/address", &wag) - return -} - -// WalletAddressesGet requests the wallets known addresses from the -// /wallet/addresses endpoint. -func (c *Client) WalletAddressesGet() (wag api.WalletAddressesGET, err error) { - err = c.get("/wallet/addresses", &wag) - return -} - -// WalletChangePasswordPost uses the /wallet/changepassword endpoint to change -// the wallet's password. -func (c *Client) WalletChangePasswordPost(currentPassword, newPassword string) (err error) { - values := url.Values{} - values.Set("newpassword", newPassword) - values.Set("encryptionpassword", currentPassword) - err = c.post("/wallet/changepassword", values.Encode(), nil) - return -} - -// WalletChangePasswordWithSeedPost uses the /wallet/changepassword endpoint to -// change the password used to encrypt the wallet. -func (c *Client) WalletChangePasswordWithSeedPost(seed modules.Seed, newPassword string) (err error) { - seedStr := modules.EncodeBIP39Phrase(seed) - return c.WalletChangePasswordPost(seedStr, newPassword) -} - -// WalletVerifyPasswordGet uses the /wallet/verifypassword endpoint to check -// the wallet's password. -func (c *Client) WalletVerifyPasswordGet(password string) (wvpg api.WalletVerifyPasswordGET, err error) { - values := url.Values{} - values.Set("password", password) - err = c.get(fmt.Sprintf("/wallet/verifypassword?%s", values.Encode()), &wvpg) - return -} - -// WalletVerifyPasswordSeedGet takes a seed and generates a seed string to -// submit to the /wallet/verifypassword endpoint. -func (c *Client) WalletVerifyPasswordSeedGet(seed modules.Seed) (wvpg api.WalletVerifyPasswordGET, err error) { - seedStr := modules.EncodeBIP39Phrase(seed) - return c.WalletVerifyPasswordGet(seedStr) -} - -// WalletInitPost uses the /wallet/init endpoint to initialize and encrypt a -// wallet. -func (c *Client) WalletInitPost(password string, force bool) (wip api.WalletInitPOST, err error) { - values := url.Values{} - values.Set("encryptionpassword", password) - values.Set("force", strconv.FormatBool(force)) - err = c.post("/wallet/init", values.Encode(), &wip) - return -} - -// WalletInitSeedPost uses the /wallet/init/seed endpoint to initialize and -// encrypt a wallet using a given seed. -func (c *Client) WalletInitSeedPost(seed, password string, force bool) (err error) { - values := url.Values{} - values.Set("seed", seed) - values.Set("encryptionpassword", password) - values.Set("force", strconv.FormatBool(force)) - err = c.post("/wallet/init/seed", values.Encode(), nil) - return -} - -// WalletGet requests the /wallet api resource. -func (c *Client) WalletGet() (wg api.WalletGET, err error) { - err = c.get("/wallet", &wg) +// WalletAddress returns a newly-generated address. +func (c *Client) WalletAddress() (addr types.Address, err error) { + err = c.c.GET("/wallet/address", &addr) return } -// WalletLastAddressesGet returns the count last addresses generated by the -// wallet in reverse order. That means the last generated address will be the -// first one in the slice. -func (c *Client) WalletLastAddressesGet(count uint64) (wag api.WalletAddressesGET, err error) { - err = c.get(fmt.Sprintf("/wallet/seedaddrs?count=%v", count), &wag) +// WalletBalance returns the current wallet balance. +func (c *Client) WalletBalance() (resp api.WalletBalanceResponse, err error) { + err = c.c.GET("/wallet/balance", &resp) return } -// WalletLockPost uses the /wallet/lock endpoint to lock the wallet. -func (c *Client) WalletLockPost() (err error) { - err = c.post("/wallet/lock", "", nil) +// WalletPoolTransactions returns all txpool transactions relevant to the wallet. +func (c *Client) WalletPoolTransactions() (resp []modules.PoolTransaction, err error) { + err = c.c.GET("/wallet/txpool", &resp) return } -// WalletSeedPost uses the /wallet/seed endpoint to add a seed to the wallet's list -// of seeds. -func (c *Client) WalletSeedPost(seed, password string) (err error) { - values := url.Values{} - values.Set("seed", seed) - values.Set("encryptionpassword", password) - err = c.post("/wallet/seed", values.Encode(), nil) - return -} - -// WalletSeedsGet uses the /wallet/seeds endpoint to return the wallet's -// current seeds. -func (c *Client) WalletSeedsGet() (wsg api.WalletSeedsGET, err error) { - err = c.get("/wallet/seeds", &wsg) - return -} - -// WalletSiacoinsMultiPost uses the /wallet/siacoin api endpoint to send money -// to multiple addresses at once. -func (c *Client) WalletSiacoinsMultiPost(outputs []types.SiacoinOutput) (wsp api.WalletSiacoinsPOST, err error) { - values := url.Values{} - marshaledOutputs, err := json.Marshal(outputs) - if err != nil { - return api.WalletSiacoinsPOST{}, err - } - values.Set("outputs", string(marshaledOutputs)) - err = c.post("/wallet/siacoins", values.Encode(), &wsp) - return -} - -// WalletSiacoinsPost uses the /wallet/siacoins api endpoint to send money to a -// single address. -func (c *Client) WalletSiacoinsPost(amount types.Currency, destination types.Address, feeIncluded bool) (wsp api.WalletSiacoinsPOST, err error) { - values := url.Values{} - values.Set("amount", amount.ExactString()) - values.Set("destination", destination.String()) - values.Set("feeIncluded", strconv.FormatBool(feeIncluded)) - err = c.post("/wallet/siacoins", values.Encode(), &wsp) - return +// WalletOutputs returns the set of unspent outputs controlled by the wallet. +func (c *Client) WalletOutputs() (sc []types.SiacoinElement, sf []types.SiafundElement, err error) { + var resp api.WalletOutputsResponse + err = c.c.GET("/wallet/outputs", &resp) + return resp.SiacoinOutputs, resp.SiafundOutputs, err } -// WalletSignPost uses the /wallet/sign api endpoint to sign a transaction. -func (c *Client) WalletSignPost(txn types.Transaction, toSign []types.Hash256) (wspr api.WalletSignPOSTResp, err error) { - json, err := json.Marshal(api.WalletSignPOSTParams{ - Transaction: txn, - ToSign: toSign, - }) - if err != nil { - return - } - err = c.post("/wallet/sign", string(json), &wspr) +// WalletAddresses returns the addresses controlled by the wallet. +func (c *Client) WalletAddresses() (addrs []types.Address, err error) { + err = c.c.GET("/wallet/addresses", &addrs) return } -// WalletSweepPost uses the /wallet/sweep/seed endpoint to sweep a seed into -// the current wallet. -func (c *Client) WalletSweepPost(seed string) (wsp api.WalletSweepPOST, err error) { - values := url.Values{} - values.Set("seed", seed) - err = c.post("/wallet/sweep/seed", values.Encode(), &wsp) +// WalletAddWatch adds the specified watch address. +func (c *Client) WalletAddWatch(addr types.Address) (err error) { + err = c.c.PUT(fmt.Sprintf("/wallet/watch/%v", addr), nil) return } -// WalletTransactionsGet requests the/wallet/transactions api resource for a -// certain startheight and endheight. -func (c *Client) WalletTransactionsGet(startHeight uint64, endHeight uint64) (wtg api.WalletTransactionsGET, err error) { - err = c.get(fmt.Sprintf("/wallet/transactions?startheight=%v&endheight=%v", - startHeight, endHeight), &wtg) +// WalletRemoveWatch removes the specified watch address. +func (c *Client) WalletRemoveWatch(addr types.Address) (err error) { + err = c.c.DELETE(fmt.Sprintf("/wallet/watch/%v", addr)) return } -// WalletTransactionGet requests the /wallet/transaction/:id api resource for a -// certain TransactionID. -func (c *Client) WalletTransactionGet(id types.TransactionID) (wtg api.WalletTransactionGETid, err error) { - err = c.get("/wallet/transaction/" + id.String(), &wtg) +// WalletWatchedAddresses returns a list of the watched addresses. +func (c *Client) WalletWatchedAddresses() (addrs []types.Address, err error) { + err = c.c.GET("/wallet/watch", &addrs) return } -// WalletUnlockPost uses the /wallet/unlock endpoint to unlock the wallet with -// a given encryption key. Per default this key is the seed. -func (c *Client) WalletUnlockPost(password string) (err error) { - values := url.Values{} - values.Set("encryptionpassword", password) - err = c.post("/wallet/unlock", values.Encode(), nil) +// WalletSendSiacoins sends a specified amount of SC to the specified address. +func (c *Client) WalletSendSiacoins(amount types.Currency, dest types.Address) (err error) { + err = c.c.POST("/wallet/send", api.WalletSendRequest{ + Amount: amount, + Destination: dest, + }, nil) return } - -// WalletUnlockConditionsGet requests the /wallet/unlockconditions endpoint -// and returns the UnlockConditions of addr. -func (c *Client) WalletUnlockConditionsGet(addr types.Address) (wucg api.WalletUnlockConditionsGET, err error) { - err = c.get("/wallet/unlockconditions/" + addr.String(), &wucg) - return -} - -// WalletUnspentGet requests the /wallet/unspent endpoint and returns all of -// the unspent outputs related to the wallet. -func (c *Client) WalletUnspentGet() (wug api.WalletUnspentGET, err error) { - err = c.get("/wallet/unspent", &wug) - return -} - -// WalletWatchGet requests the /wallet/watch endpoint and returns the set of -// currently watched addresses. -func (c *Client) WalletWatchGet() (wwg api.WalletWatchGET, err error) { - err = c.get("/wallet/watch", &wwg) - return -} - -// WalletWatchAddPost uses the /wallet/watch endpoint to add a set of addresses -// to the watch set. The unused flag should be set to true if the addresses -// have never appeared in the blockchain. -func (c *Client) WalletWatchAddPost(addrs []types.Address, unused bool) error { - json, err := json.Marshal(api.WalletWatchPOST{ - Addresses: addrs, - Remove: false, - Unused: unused, - }) - if err != nil { - return err - } - return c.post("/wallet/watch", string(json), nil) -} - -// WalletWatchRemovePost uses the /wallet/watch endpoint to remove a set of -// addresses from the watch set. The unused flag should be set to true if the -// addresses have never appeared in the blockchain. -func (c *Client) WalletWatchRemovePost(addrs []types.Address, unused bool) error { - json, err := json.Marshal(api.WalletWatchPOST{ - Addresses: addrs, - Remove: true, - Unused: unused, - }) - if err != nil { - return err - } - return c.post("/wallet/watch", string(json), nil) -} diff --git a/node/api/consensus.go b/node/api/consensus.go deleted file mode 100644 index 5b4a48c..0000000 --- a/node/api/consensus.go +++ /dev/null @@ -1,270 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -// ConsensusGET contains general information about the consensus set, with tags -// to support idiomatic json encodings. -type ConsensusGET struct { - Synced bool `json:"synced"` - Height uint64 `json:"height"` - CurrentBlock types.BlockID `json:"currentblock"` - Target modules.Target `json:"target"` - Difficulty types.Currency `json:"difficulty"` -} - -// ConsensusHeadersGET contains information from a blocks header. -type ConsensusHeadersGET struct { - BlockID types.BlockID `json:"blockid"` -} - -// ConsensusBlocksGet contains all fields of a types.Block and additional -// fields for ID and Height. -type ConsensusBlocksGet struct { - ID types.BlockID `json:"id"` - Height uint64 `json:"height"` - ParentID types.BlockID `json:"parentid"` - Nonce uint64 `json:"nonce"` - Difficulty types.Currency `json:"difficulty"` - Timestamp time.Time `json:"timestamp"` - MinerPayouts []types.SiacoinOutput `json:"minerpayouts"` - Transactions []ConsensusBlocksGetTxn `json:"transactions"` -} - -// ConsensusBlocksGetTxn contains all fields of a types.Transaction and an -// additional ID field. -type ConsensusBlocksGetTxn struct { - ID types.TransactionID `json:"id"` - SiacoinInputs []types.SiacoinInput `json:"siacoininputs"` - SiacoinOutputs []ConsensusBlocksGetSiacoinOutput `json:"siacoinoutputs"` - FileContracts []ConsensusBlocksGetFileContract `json:"filecontracts"` - FileContractRevisions []types.FileContractRevision `json:"filecontractrevisions"` - StorageProofs []types.StorageProof `json:"storageproofs"` - SiafundInputs []types.SiafundInput `json:"siafundinputs"` - SiafundOutputs []ConsensusBlocksGetSiafundOutput `json:"siafundoutputs"` - MinerFees []types.Currency `json:"minerfees"` - ArbitraryData [][]byte `json:"arbitrarydata"` - TransactionSignatures []types.TransactionSignature `json:"transactionsignatures"` -} - -// ConsensusBlocksGetFileContract contains all fields of a types.FileContract -// and an additional ID field. -type ConsensusBlocksGetFileContract struct { - ID types.FileContractID `json:"id"` - Filesize uint64 `json:"filesize"` - FileMerkleRoot types.Hash256 `json:"filemerkleroot"` - WindowStart uint64 `json:"windowstart"` - WindowEnd uint64 `json:"windowend"` - Payout types.Currency `json:"payout"` - ValidProofOutputs []ConsensusBlocksGetSiacoinOutput `json:"validproofoutputs"` - MissedProofOutputs []ConsensusBlocksGetSiacoinOutput `json:"missedproofoutputs"` - UnlockHash types.Hash256 `json:"unlockhash"` - RevisionNumber uint64 `json:"revisionnumber"` -} - -// ConsensusBlocksGetSiacoinOutput contains all fields of a types.SiacoinOutput -// and an additional ID field. -type ConsensusBlocksGetSiacoinOutput struct { - ID types.SiacoinOutputID `json:"id"` - Value types.Currency `json:"value"` - Address types.Address `json:"unlockhash"` -} - -// ConsensusBlocksGetSiafundOutput contains all fields of a types.SiafundOutput -// and an additional ID field. -type ConsensusBlocksGetSiafundOutput struct { - ID types.SiafundOutputID `json:"id"` - Value uint64 `json:"value"` - Address types.Address `json:"unlockhash"` -} - -// RegisterRoutesConsensus is a helper function to register all consensus routes. -func RegisterRoutesConsensus(router *httprouter.Router, cs modules.ConsensusSet) { - router.GET("/consensus", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - consensusHandler(cs, w, req, ps) - }) - router.GET("/consensus/blocks", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - consensusBlocksHandler(cs, w, req, ps) - }) - router.POST("/consensus/validate/transactionset", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - consensusValidateTransactionsetHandler(cs, w, req, ps) - }) -} - -// consensusBlocksGetFromBlock is a helper method that uses a types.Block, types.BlockHeight and -// types.Currency to create a ConsensusBlocksGet object. -func consensusBlocksGetFromBlock(b types.Block, h uint64, d types.Currency) ConsensusBlocksGet { - txns := make([]ConsensusBlocksGetTxn, 0, len(b.Transactions)) - for _, t := range b.Transactions { - // Get the transaction's SiacoinOutputs. - scos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(t.SiacoinOutputs)) - for i, sco := range t.SiacoinOutputs { - scos = append(scos, ConsensusBlocksGetSiacoinOutput{ - ID: t.SiacoinOutputID(i), - Value: sco.Value, - Address: sco.Address, - }) - } - // Get the transaction's SiafundOutputs. - sfos := make([]ConsensusBlocksGetSiafundOutput, 0, len(t.SiafundOutputs)) - for i, sfo := range t.SiafundOutputs { - sfos = append(sfos, ConsensusBlocksGetSiafundOutput{ - ID: t.SiafundOutputID(i), - Value: sfo.Value, - Address: sfo.Address, - }) - } - // Get the transaction's FileContracts. - fcos := make([]ConsensusBlocksGetFileContract, 0, len(t.FileContracts)) - for i, fc := range t.FileContracts { - // Get the FileContract's valid proof outputs. - fcid := t.FileContractID(i) - vpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.ValidProofOutputs)) - for j, vpo := range fc.ValidProofOutputs { - vpos = append(vpos, ConsensusBlocksGetSiacoinOutput{ - ID: fcid.ValidOutputID(j), - Value: vpo.Value, - Address: vpo.Address, - }) - } - // Get the FileContract's missed proof outputs. - mpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.MissedProofOutputs)) - for j, mpo := range fc.MissedProofOutputs { - mpos = append(mpos, ConsensusBlocksGetSiacoinOutput{ - ID: fcid.MissedOutputID(j), - Value: mpo.Value, - Address: mpo.Address, - }) - } - fcos = append(fcos, ConsensusBlocksGetFileContract{ - ID: fcid, - Filesize: fc.Filesize, - FileMerkleRoot: fc.FileMerkleRoot, - WindowStart: fc.WindowStart, - WindowEnd: fc.WindowEnd, - Payout: fc.Payout, - ValidProofOutputs: vpos, - MissedProofOutputs: mpos, - UnlockHash: fc.UnlockHash, - RevisionNumber: fc.RevisionNumber, - }) - } - txns = append(txns, ConsensusBlocksGetTxn{ - ID: t.ID(), - SiacoinInputs: t.SiacoinInputs, - SiacoinOutputs: scos, - FileContracts: fcos, - FileContractRevisions: t.FileContractRevisions, - StorageProofs: t.StorageProofs, - SiafundInputs: t.SiafundInputs, - SiafundOutputs: sfos, - MinerFees: t.MinerFees, - ArbitraryData: t.ArbitraryData, - TransactionSignatures: t.Signatures, - }) - } - return ConsensusBlocksGet{ - ID: b.ID(), - Height: h, - ParentID: b.ParentID, - Nonce: b.Nonce, - Difficulty: d, - Timestamp: b.Timestamp, - MinerPayouts: b.MinerPayouts, - Transactions: txns, - } -} - -// consensusHandler handles the API calls to /consensus. -func consensusHandler(cs modules.ConsensusSet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - height := cs.Height() - b, found := cs.BlockAtHeight(height) - if !found { - err := "Failed to fetch block for current height" - WriteError(w, Error{err}, http.StatusInternalServerError) - return - } - cbid := b.ID() - currentTarget, _ := cs.ChildTarget(cbid) - WriteJSON(w, ConsensusGET{ - Synced: cs.Synced(), - Height: height, - CurrentBlock: cbid, - Target: currentTarget, - Difficulty: currentTarget.Difficulty(), - }) -} - -// consensusBlocksHandler handles the API calls to /consensus/blocks -// endpoint. -func consensusBlocksHandler(cs modules.ConsensusSet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Get query params and check them. - id, height := req.FormValue("id"), req.FormValue("height") - if id != "" && height != "" { - WriteError(w, Error{"can't specify both id and height"}, http.StatusBadRequest) - return - } - if id == "" && height == "" { - WriteError(w, Error{"either id or height has to be provided"}, http.StatusBadRequest) - return - } - - var b types.Block - var h uint64 - var exists bool - - // Handle request by id. - if id != "" { - var bid types.BlockID - if err := bid.UnmarshalText([]byte(id)); err != nil { - WriteError(w, Error{"failed to unmarshal blockid"}, http.StatusBadRequest) - return - } - b, h, exists = cs.BlockByID(bid) - } - // Handle request by height. - if height != "" { - if _, err := fmt.Sscan(height, &h); err != nil { - WriteError(w, Error{"failed to parse block height"}, http.StatusBadRequest) - return - } - b, exists = cs.BlockAtHeight(h) - } - // Check if block was found. - if !exists { - WriteError(w, Error{"block doesn't exist"}, http.StatusBadRequest) - return - } - - target, _ := cs.ChildTarget(b.ID()) - d := target.Difficulty() - - // Write response. - WriteJSON(w, consensusBlocksGetFromBlock(b, h, d)) -} - -// consensusValidateTransactionsetHandler handles the API calls to -// /consensus/validate/transactionset. -func consensusValidateTransactionsetHandler(cs modules.ConsensusSet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var txnset []types.Transaction - err := json.NewDecoder(req.Body).Decode(&txnset) - if err != nil { - WriteError(w, Error{"could not decode transaction set: " + err.Error()}, http.StatusBadRequest) - return - } - _, err = cs.TryTransactionSet(txnset) - if err != nil { - WriteError(w, Error{"transaction set validation failed: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} diff --git a/node/api/daemon.go b/node/api/daemon.go deleted file mode 100644 index a665729..0000000 --- a/node/api/daemon.go +++ /dev/null @@ -1,80 +0,0 @@ -package api - -import ( - "log" - "net/http" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/internal/build" - "github.com/mike76-dev/sia-satellite/modules" -) - -type ( - // DaemonAlertsGet contains information about currently registered alerts - // across all loaded modules. - DaemonAlertsGet struct { - Alerts []modules.Alert `json:"alerts"` - CriticalAlerts []modules.Alert `json:"criticalalerts"` - ErrorAlerts []modules.Alert `json:"erroralerts"` - WarningAlerts []modules.Alert `json:"warningalerts"` - InfoAlerts []modules.Alert `json:"infoalerts"` - } - - // DaemonVersionGet contains information about the running daemon's version. - DaemonVersionGet struct { - Version string - GitRevision string - BuildTime string - } - - // DaemonVersion holds the version information for satd. - DaemonVersion struct { - Version string `json:"version"` - GitRevision string `json:"gitrevision"` - BuildTime string `json:"buildtime"` - } -) - -// daemonAlertsHandlerGET handles the API call that returns the alerts of all -// loaded modules. -func (api *API) daemonAlertsHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // initialize slices to avoid "null" in response. - crit := make([]modules.Alert, 0, 6) - err := make([]modules.Alert, 0, 6) - warn := make([]modules.Alert, 0, 6) - info := make([]modules.Alert, 0, 6) - if api.gateway != nil { - c, e, w, i := api.gateway.Alerts() - crit = append(crit, c...) - err = append(err, e...) - warn = append(warn, w...) - info = append(info, i...) - } - // Sort alerts by severity. Critical first, then Error and finally Warning. - alerts := append(append(crit, append(err, warn...)...), info...) - WriteJSON(w, DaemonAlertsGet{ - Alerts: alerts, - CriticalAlerts: crit, - ErrorAlerts: err, - WarningAlerts: warn, - InfoAlerts: info, - }) -} - -// daemonVersionHandler handles the API call that requests the daemon's version. -func (api *API) daemonVersionHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - WriteJSON(w, DaemonVersion{Version: build.NodeVersion, GitRevision: build.GitRevision, BuildTime: build.BuildTime}) -} - -// daemonStopHandler handles the API call to stop the daemon cleanly. -func (api *API) daemonStopHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // can't write after we stop the server, so lie a bit. - WriteSuccess(w) - - // Shutdown in a separate goroutine to prevent a deadlock. - go func() { - if err := api.Shutdown(); err != nil { - log.Fatal(err) - } - }() -} diff --git a/node/api/gateway.go b/node/api/gateway.go deleted file mode 100644 index d7ad2f5..0000000 --- a/node/api/gateway.go +++ /dev/null @@ -1,150 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" -) - -type ( - // GatewayGET contains the fields returned by a GET call to "/gateway". - GatewayGET struct { - NetAddress modules.NetAddress `json:"netaddress"` - Peers []modules.Peer `json:"peers"` - Online bool `json:"online"` - } - - // GatewayBlocklistPOST contains the information needed to set the Blocklist. - // of the gateway - GatewayBlocklistPOST struct { - Action string `json:"action"` - Addresses []string `json:"addresses"` - } - - // GatewayBlocklistGET contains the Blocklist of the gateway. - GatewayBlocklistGET struct { - Blocklist []string `json:"blocklist"` - } -) - -// RegisterRoutesGateway is a helper function to register all gateway routes. -func RegisterRoutesGateway(router *httprouter.Router, g modules.Gateway, requiredPassword string) { - router.GET("/gateway", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - gatewayHandler(g, w, req, ps) - }) - router.POST("/gateway/connect/:netaddress", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - gatewayConnectHandler(g, w, req, ps) - }, requiredPassword)) - router.POST("/gateway/disconnect/:netaddress", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - gatewayDisconnectHandler(g, w, req, ps) - }, requiredPassword)) - router.GET("/gateway/blocklist", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - gatewayBlocklistHandlerGET(g, w, req, ps) - }) - router.POST("/gateway/blocklist", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - gatewayBlocklistHandlerPOST(g, w, req, ps) - }, requiredPassword)) -} - -// gatewayHandler handles the API call asking for the gateway status. -func gatewayHandler(gateway modules.Gateway, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - peers := gateway.Peers() - // nil slices are marshalled as 'null' in JSON, whereas 0-length slices are - // marshalled as '[]'. The latter is preferred, indicating that the value - // exists but contains no elements. - if peers == nil { - peers = make([]modules.Peer, 0) - } - WriteJSON(w, GatewayGET{gateway.Address(), peers, gateway.Online()}) -} - -// gatewayConnectHandler handles the API call to add a peer to the gateway. -func gatewayConnectHandler(gateway modules.Gateway, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - addr := modules.NetAddress(ps.ByName("netaddress")) - err := gateway.ConnectManual(addr) - if err != nil { - WriteError(w, Error{err.Error()}, http.StatusBadRequest) - return - } - - WriteSuccess(w) -} - -// gatewayDisconnectHandler handles the API call to remove a peer from the gateway. -func gatewayDisconnectHandler(gateway modules.Gateway, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - addr := modules.NetAddress(ps.ByName("netaddress")) - err := gateway.DisconnectManual(addr) - if err != nil { - WriteError(w, Error{err.Error()}, http.StatusBadRequest) - return - } - - WriteSuccess(w) -} - -// gatewayBlocklistHandlerGET handles the API call to get the gateway's -// blocklist. -func gatewayBlocklistHandlerGET(gateway modules.Gateway, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // Get Blocklist. - blocklist, err := gateway.Blocklist() - if err != nil { - WriteError(w, Error{"unable to get blocklist mode: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, GatewayBlocklistGET{ - Blocklist: blocklist, - }) -} - -// gatewayBlocklistHandlerPOST handles the API call to modify the gateway's -// blocklist. -// -// Addresses will be passed in as an array of strings, comma separated net -// addresses. -func gatewayBlocklistHandlerPOST(gateway modules.Gateway, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var params GatewayBlocklistPOST - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - switch params.Action { - case "append": - // Check that addresses where submitted. - if len(params.Addresses) == 0 { - WriteError(w, Error{"no addresses submitted to append or remove"}, http.StatusBadRequest) - return - } - // Add addresses to Blocklist. - if err := gateway.AddToBlocklist(params.Addresses); err != nil { - WriteError(w, Error{"failed to add addresses to the blocklist: " + err.Error()}, http.StatusBadRequest) - return - } - case "remove": - // Check that addresses where submitted. - if len(params.Addresses) == 0 { - WriteError(w, Error{"no addresses submitted to append or remove"}, http.StatusBadRequest) - return - } - // Remove addresses from the Blocklist. - if err := gateway.RemoveFromBlocklist(params.Addresses); err != nil { - WriteError(w, Error{"failed to remove addresses from the blocklist: " + err.Error()}, http.StatusBadRequest) - return - } - case "set": - // Set Blocklist. - if err := gateway.SetBlocklist(params.Addresses); err != nil { - WriteError(w, Error{"failed to set the blocklist: " + err.Error()}, http.StatusBadRequest) - return - } - default: - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - WriteSuccess(w) -} diff --git a/node/api/hostdb.go b/node/api/hostdb.go deleted file mode 100644 index fc1e6e8..0000000 --- a/node/api/hostdb.go +++ /dev/null @@ -1,216 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -type ( - // ExtendedHostDBEntry is an extension to modules.HostDBEntry that includes - // the string representation of the public key. - ExtendedHostDBEntry struct { - modules.HostDBEntry - PublicKeyString string `json:"publickeystring"` - } - - // HostdbActiveGET lists active hosts on the network. - HostdbActiveGET struct { - Hosts []ExtendedHostDBEntry `json:"hosts"` - } - - // HostdbAllGET lists all hosts that the manager is aware of. - HostdbAllGET struct { - Hosts []ExtendedHostDBEntry `json:"hosts"` - } - - // HostdbHostsGET lists detailed statistics for a particular host, selected - // by pubkey. - HostdbHostsGET struct { - Entry ExtendedHostDBEntry `json:"entry"` - ScoreBreakdown modules.HostScoreBreakdown `json:"scorebreakdown"` - } - - // HostdbGet holds information about the hostdb. - HostdbGet struct { - BlockHeight uint64 `json:"blockheight"` - InitialScanComplete bool `json:"initialscancomplete"` - } - - // HostdbFilterModeGET contains the information about the HostDB's - // filtermode. - HostdbFilterModeGET struct { - FilterMode string `json:"filtermode"` - Hosts []string `json:"hosts"` - NetAddresses []string `json:"netaddresses"` - } - - // HostdbFilterModePOST contains the information needed to set the the - // FilterMode of the hostDB. - HostdbFilterModePOST struct { - FilterMode string `json:"filtermode"` - Hosts []types.PublicKey `json:"hosts"` - NetAddresses []string `json:"netaddresses"` - } -) - -// hostdbHandler handles the API call asking for the status of HostDB. -func (api *API) hostdbHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - isc, bh, err := api.manager.InitialScanComplete() - if err != nil { - WriteError(w, Error{"Failed to get initial scan status: " + err.Error()}, http.StatusInternalServerError) - return - } - WriteJSON(w, HostdbGet{ - BlockHeight: bh, - InitialScanComplete: isc, - }) -} - -// hostdbActiveHandler handles the API call asking for the list of active -// hosts. -func (api *API) hostdbActiveHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var numHosts uint64 - hosts, err := api.manager.ActiveHosts() - if err != nil { - WriteError(w, Error{"unable to get active hosts: " + err.Error()}, http.StatusBadRequest) - return - } - - if req.FormValue("numhosts") == "" { - // Default value for 'numhosts' is all of them. - numHosts = uint64(len(hosts)) - } else { - // Parse the value for 'numhosts'. - _, err := fmt.Sscan(req.FormValue("numhosts"), &numHosts) - if err != nil { - WriteError(w, Error{"unable to parse numhosts: " + err.Error()}, http.StatusBadRequest) - return - } - - // Catch any boundary errors. - if numHosts > uint64(len(hosts)) { - numHosts = uint64(len(hosts)) - } - } - - // Convert the entries into extended entries. - var extendedHosts []ExtendedHostDBEntry - for _, host := range hosts { - extendedHosts = append(extendedHosts, ExtendedHostDBEntry{ - HostDBEntry: host, - PublicKeyString: host.PublicKey.String(), - }) - } - - WriteJSON(w, HostdbActiveGET{ - Hosts: extendedHosts[:numHosts], - }) -} - -// hostdbAllHandler handles the API call asking for the list of all hosts. -func (api *API) hostdbAllHandler(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // Get the set of all hosts and convert them into extended hosts. - hosts, err := api.manager.AllHosts() - if err != nil { - WriteError(w, Error{"unable to get all hosts: " + err.Error()}, http.StatusBadRequest) - return - } - var extendedHosts []ExtendedHostDBEntry - for _, host := range hosts { - extendedHosts = append(extendedHosts, ExtendedHostDBEntry{ - HostDBEntry: host, - PublicKeyString: host.PublicKey.String(), - }) - } - - WriteJSON(w, HostdbAllGET{ - Hosts: extendedHosts, - }) -} - -// hostdbHostsHandler handles the API call asking for a specific host, -// returning detailed information about that host. -func (api *API) hostdbHostsHandler(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - var pk types.PublicKey - if err := pk.UnmarshalText([]byte(ps.ByName("pubkey"))); err != nil { - WriteError(w, Error{"unable to unmarshal public key: " + err.Error()}, http.StatusBadRequest) - return - } - - entry, exists, err := api.manager.Host(pk) - if err != nil { - WriteError(w, Error{"unable to get host: " + err.Error()}, http.StatusBadRequest) - return - } - if !exists { - WriteError(w, Error{"requested host does not exist"}, http.StatusBadRequest) - return - } - breakdown, err := api.manager.ScoreBreakdown(entry) - if err != nil { - WriteError(w, Error{"error calculating score breakdown: " + err.Error()}, http.StatusInternalServerError) - return - } - - // Extend the hostdb entry to have the public key string. - extendedEntry := ExtendedHostDBEntry{ - HostDBEntry: entry, - PublicKeyString: entry.PublicKey.String(), - } - WriteJSON(w, HostdbHostsGET{ - Entry: extendedEntry, - ScoreBreakdown: breakdown, - }) -} - -// hostdbFilterModeHandlerGET handles the API call to get the hostdb's filter -// mode. -func (api *API) hostdbFilterModeHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // Get FilterMode. - fm, hostMap, netAddresses, err := api.manager.Filter() - if err != nil { - WriteError(w, Error{"unable to get filter mode: " + err.Error()}, http.StatusBadRequest) - return - } - // Build Slice of PubKeys. - var hosts []string - for key := range hostMap { - hosts = append(hosts, key) - } - WriteJSON(w, HostdbFilterModeGET{ - FilterMode: fm.String(), - Hosts: hosts, - NetAddresses: netAddresses, - }) -} - -// hostdbFilterModeHandlerPOST handles the API call to set the hostdb's filter -// mode. -func (api *API) hostdbFilterModeHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var params HostdbFilterModePOST - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - var fm modules.FilterMode - if err = fm.FromString(params.FilterMode); err != nil { - WriteError(w, Error{"unable to load filter mode from string: " + err.Error()}, http.StatusBadRequest) - return - } - - // Set list mode. - if err := api.manager.SetFilterMode(fm, params.Hosts, params.NetAddresses); err != nil { - WriteError(w, Error{"failed to set the list mode: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} diff --git a/node/api/manager.go b/node/api/manager.go deleted file mode 100644 index 4d05f8d..0000000 --- a/node/api/manager.go +++ /dev/null @@ -1,437 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -type ( - // ExchangeRate contains the exchange rate of a given currency. - ExchangeRate struct { - Currency string `json:"currency"` - Rate float64 `json:"rate"` - } - - // HostAverages contains the host network averages. - HostAverages struct { - modules.HostAverages - Rate float64 - } - - // Renter contains information about the renter. - Renter struct { - Email string `json:"email"` - PublicKey types.PublicKey `json:"publickey"` - } - - // RentersGET contains the list of the renters. - RentersGET struct { - Renters []Renter `json:"renters"` - } - - // RenterContract represents a contract formed by the renter. - RenterContract struct { - // Amount of contract funds that have been spent on downloads. - DownloadSpending types.Currency `json:"downloadspending"` - // Block height that the file contract ends on. - EndHeight uint64 `json:"endheight"` - // Fees paid in order to form the file contract. - Fees types.Currency `json:"fees"` - // Amount of contract funds that have been spent on funding an ephemeral - // account on the host. - FundAccountSpending types.Currency `json:"fundaccountspending"` - // Public key of the renter that formed the contract. - RenterPublicKey types.PublicKey `json:"renterpublickey"` - // Public key of the host the contract was formed with. - HostPublicKey types.PublicKey `json:"hostpublickey"` - // HostVersion is the version of Sia that the host is running. - HostVersion string `json:"hostversion"` - // ID of the file contract. - ID types.FileContractID `json:"id"` - // A signed transaction containing the most recent contract revision. - LastTransaction types.Transaction `json:"lasttransaction"` - // Amount of contract funds that have been spent on maintenance tasks - // such as updating the price table or syncing the ephemeral account - // balance. - MaintenanceSpending modules.MaintenanceSpending `json:"maintenancespending"` - // Address of the host the file contract was formed with. - NetAddress string `json:"netaddress"` - // Remaining funds left to spend on uploads & downloads. - RenterFunds types.Currency `json:"renterfunds"` - // Size of the file contract, which is typically equal to the number of - // bytes that have been uploaded to the host. - Size uint64 `json:"size"` - // Block height that the file contract began on. - StartHeight uint64 `json:"startheight"` - // Amount of contract funds that have been spent on storage. - StorageSpending types.Currency `json:"storagespending"` - // Total cost to the wallet of forming the file contract. - TotalCost types.Currency `json:"totalcost"` - // Amount of contract funds that have been spent on uploads. - UploadSpending types.Currency `json:"uploadspending"` - // Signals if contract is good for uploading data. - GoodForUpload bool `json:"goodforupload"` - // Signals if contract is good for a renewal. - GoodForRenew bool `json:"goodforrenew"` - // Signals if a contract has been marked as bad. - BadContract bool `json:"badcontract"` - } - - // RenterContracts contains the renter's contracts. - RenterContracts struct { - ActiveContracts []RenterContract `json:"activecontracts"` - PassiveContracts []RenterContract `json:"passivecontracts"` - RefreshedContracts []RenterContract `json:"refreshedcontracts"` - DisabledContracts []RenterContract `json:"disabledcontracts"` - ExpiredContracts []RenterContract `json:"expiredcontracts"` - ExpiredRefreshedContracts []RenterContract `json:"expiredrefreshedcontracts"` - } - - // EmailPreferences contains the email preferences. - EmailPreferences struct { - Email string `json:"email"` - WarnThreshold types.Currency `json:"threshold"` - } -) - -// managerAveragesHandlerGET handles the API call to /manager/averages. -func (api *API) managerAveragesHandlerGET(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - currency := strings.ToUpper(ps.ByName("currency")) - if currency == "" { - currency = "SC" - } - - rate := float64(1.0) - var err error - if currency != "SC" { - rate, err = api.manager.GetSiacoinRate(currency) - if err != nil { - WriteError(w, Error{fmt.Sprintf("couldn't get exchange rate: %v", err)}, http.StatusInternalServerError) - return - } - } - - ha := HostAverages{api.manager.GetAverages(), rate} - - WriteJSON(w, ha) -} - -// managerRentersHandlerGET handles the API call to /manager/renters. -func (api *API) managerRentersHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - renters := api.manager.Renters() - - r := RentersGET{ - Renters: make([]Renter, 0, len(renters)), - } - - for _, renter := range renters { - r.Renters = append(r.Renters, Renter{ - Email: renter.Email, - PublicKey: renter.PublicKey, - }) - } - - WriteJSON(w, r) -} - -// managerRenterHandlerGET handles the API call to /manager/renter. -func (api *API) managerRenterHandlerGET(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - pk := ps.ByName("publickey") - if pk == "" { - WriteError(w, Error{"public key not specified"}, http.StatusBadRequest) - return - } - - var key types.PublicKey - err := key.UnmarshalText([]byte(pk)) - if err != nil { - WriteError(w, Error{"couldn't unmarshal public key: " + err.Error()}, http.StatusBadRequest) - return - } - - renter, err := api.manager.GetRenter(key) - if err != nil { - WriteError(w, Error{"renter not found: " + err.Error()}, http.StatusBadRequest) - return - } - - WriteJSON(w, renter) -} - -// managerBalanceHandlerGET handles the API call to /manager/balance. -func (api *API) managerBalanceHandlerGET(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - pk := ps.ByName("publickey") - if pk == "" { - WriteError(w, Error{"public key not specified"}, http.StatusBadRequest) - return - } - - var key types.PublicKey - err := key.UnmarshalText([]byte(pk)) - if err != nil { - WriteError(w, Error{"couldn't unmarshal public key: " + err.Error()}, http.StatusBadRequest) - return - } - - renter, err := api.manager.GetRenter(key) - if err != nil { - WriteError(w, Error{"renter not found: " + err.Error()}, http.StatusBadRequest) - return - } - - ub, err := api.manager.GetBalance(renter.Email) - if err != nil { - WriteError(w, Error{"unable to get balance: " + err.Error()}, http.StatusInternalServerError) - return - } - - WriteJSON(w, ub) -} - -// managerContractsHandlerGET handles the API call to /manager/contracts. -// -// Active contracts are contracts that are actively being used to store data -// and can upload, download, and renew. These contracts are GoodForUpload -// and GoodForRenew. -// -// Refreshed contracts are contracts that are in the current period and were -// refreshed due to running out of funds. A new contract that replaced a -// refreshed contract can either be in Active or Disabled contracts. These -// contracts are broken out as to not double count the data recorded in the -// contract. -// -// Disabled Contracts are contracts that are no longer active as there are Not -// GoodForUpload and Not GoodForRenew but still have endheights in the current -// period. -// -// Expired contracts are contracts who's endheights are in the past. -// -// ExpiredRefreshed contracts are refreshed contracts who's endheights are in -// the past. -func (api *API) managerContractsHandlerGET(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - pk := strings.ToLower(ps.ByName("publickey")) - var rc RenterContracts - currentBlockHeight := api.cs.Height() - - // Fetch the contracts. - var renter modules.Renter - var contracts, oldContracts []modules.RenterContract - if pk != "" { - var key types.PublicKey - err := key.UnmarshalText([]byte(pk)) - if err != nil { - WriteError(w, Error{"couldn't unmarshal public key: " + err.Error()}, http.StatusBadRequest) - return - } - contracts = api.manager.ContractsByRenter(key) - oldContracts = api.manager.OldContractsByRenter(key) - renter, _ = api.manager.GetRenter(key) - } else { - contracts = api.manager.Contracts() - oldContracts = api.manager.OldContracts() - } - - for _, c := range contracts { - // Fetch host address. - var netAddress string - hdbe, exists, _ := api.manager.Host(c.HostPublicKey) - if exists { - netAddress = hdbe.Settings.NetAddress - } - - // Build the contract. - contract := RenterContract{ - BadContract: c.Utility.BadContract, - DownloadSpending: c.DownloadSpending, - EndHeight: c.EndHeight, - Fees: c.TxnFee.Add(c.SiafundFee).Add(c.ContractFee), - FundAccountSpending: c.FundAccountSpending, - GoodForUpload: c.Utility.GoodForUpload, - GoodForRenew: c.Utility.GoodForRenew, - RenterPublicKey: c.RenterPublicKey, - HostPublicKey: c.HostPublicKey, - HostVersion: hdbe.Settings.Version, - ID: c.ID, - LastTransaction: c.Transaction, - NetAddress: netAddress, - MaintenanceSpending: c.MaintenanceSpending, - RenterFunds: c.RenterFunds, - Size: c.Size(), - StartHeight: c.StartHeight, - StorageSpending: c.StorageSpending, - TotalCost: c.TotalCost, - UploadSpending: c.UploadSpending, - } - - // Determine contract status. - refreshed := api.manager.RefreshedContract(c.ID) - active := c.Utility.GoodForUpload && c.Utility.GoodForRenew && !refreshed - passive := !c.Utility.GoodForUpload && c.Utility.GoodForRenew && !refreshed - disabledContract := !active && !passive && !refreshed - - // A contract can either be active, passive, refreshed, or disabled. - statusErr := active && passive && refreshed || active && refreshed || active && passive || passive && refreshed - if statusErr { - fmt.Println("CRITICAL: Contract has multiple status types, this should never happen") - } else if active { - rc.ActiveContracts = append(rc.ActiveContracts, contract) - } else if passive { - rc.PassiveContracts = append(rc.PassiveContracts, contract) - } else if refreshed { - rc.RefreshedContracts = append(rc.RefreshedContracts, contract) - } else if disabledContract { - rc.DisabledContracts = append(rc.DisabledContracts, contract) - } - } - - // Process old contracts. - for _, c := range oldContracts { - var size uint64 - if len(c.Transaction.FileContractRevisions) != 0 { - size = c.Transaction.FileContractRevisions[0].Filesize - } - - // Fetch host address. - var netAddress string - hdbe, exists, _ := api.manager.Host(c.HostPublicKey) - if exists { - netAddress = hdbe.Settings.NetAddress - } - - // Build the contract. - contract := RenterContract{ - BadContract: c.Utility.BadContract, - DownloadSpending: c.DownloadSpending, - EndHeight: c.EndHeight, - Fees: c.TxnFee.Add(c.SiafundFee).Add(c.ContractFee), - FundAccountSpending: c.FundAccountSpending, - GoodForUpload: c.Utility.GoodForUpload, - GoodForRenew: c.Utility.GoodForRenew, - RenterPublicKey: c.RenterPublicKey, - HostPublicKey: c.HostPublicKey, - HostVersion: hdbe.Settings.Version, - ID: c.ID, - LastTransaction: c.Transaction, - MaintenanceSpending: c.MaintenanceSpending, - NetAddress: netAddress, - RenterFunds: c.RenterFunds, - Size: size, - StartHeight: c.StartHeight, - StorageSpending: c.StorageSpending, - TotalCost: c.TotalCost, - UploadSpending: c.UploadSpending, - } - - // Determine contract status. - refreshed := api.manager.RefreshedContract(c.ID) - endHeightInPast := c.EndHeight < uint64(currentBlockHeight) - if pk != "" { - endHeightInPast = endHeightInPast || c.StartHeight < renter.CurrentPeriod - } - expiredContract := endHeightInPast && !refreshed - expiredRefreshed := endHeightInPast && refreshed - refreshedContract := refreshed && !endHeightInPast - disabledContract := !refreshed && !endHeightInPast - - // A contract can only be refreshed, disabled, expired, or expired refreshed. - if expiredContract { - rc.ExpiredContracts = append(rc.ExpiredContracts, contract) - } else if expiredRefreshed { - rc.ExpiredRefreshedContracts = append(rc.ExpiredRefreshedContracts, contract) - } else if refreshedContract { - rc.RefreshedContracts = append(rc.RefreshedContracts, contract) - } else if disabledContract { - rc.DisabledContracts = append(rc.DisabledContracts, contract) - } - } - - WriteJSON(w, rc) -} - -// managerPreferencesHandlerGET handles the API call to /manager/preferences. -func (api *API) managerPreferencesHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - email, threshold := api.manager.GetEmailPreferences() - ep := EmailPreferences{ - Email: email, - WarnThreshold: threshold, - } - - WriteJSON(w, ep) -} - -// managerPreferencesHandlerPOST handles the API call to /manager/preferences. -func (api *API) managerPreferencesHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var ep EmailPreferences - err := json.NewDecoder(req.Body).Decode(&ep) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - // Set the preferences. - if err := api.manager.SetEmailPreferences(ep.Email, ep.WarnThreshold); err != nil { - WriteError(w, Error{"failed to change the preferences: " + err.Error()}, http.StatusInternalServerError) - return - } - - WriteSuccess(w) -} - -// managerPricesHandlerGET handles the API call to /manager/prices. -func (api *API) managerPricesHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - WriteJSON(w, modules.StaticPricing) -} - -// managerPricesHandlerPOST handles the API call to /manager/prices. -func (api *API) managerPricesHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var prices modules.Pricing - err := json.NewDecoder(req.Body).Decode(&prices) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - // Set the prices. - if err := api.manager.UpdatePrices(prices); err != nil { - WriteError(w, Error{"failed to change the prices: " + err.Error()}, http.StatusInternalServerError) - return - } - - WriteSuccess(w) -} - -// managerMaintenanceHandlerPOST handles the API call to /manager/maintenance. -func (api *API) managerMaintenanceHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var start struct { - Start bool `json:"start"` - } - err := json.NewDecoder(req.Body).Decode(&start) - if err != nil { - WriteError(w, Error{"invalid parameter: " + err.Error()}, http.StatusBadRequest) - return - } - - err = api.manager.StartMaintenance(start.Start) - if err != nil { - WriteError(w, Error{"couldn't set maintenance flag: " + err.Error()}, http.StatusInternalServerError) - return - } - - WriteSuccess(w) -} - -// managerMaintenanceHandlerGET handles the API call to /manager/maintenance. -func (api *API) managerMaintenanceHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - WriteJSON(w, struct { - Maintenance bool `json:"maintenance"` - }{Maintenance: api.manager.Maintenance()}) -} diff --git a/node/api/portal.go b/node/api/portal.go deleted file mode 100644 index 0c34542..0000000 --- a/node/api/portal.go +++ /dev/null @@ -1,70 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" -) - -// Announcement contains the information about a portal announcement. -type Announcement struct { - Text string `json:"text"` - Expires uint64 `json:"expires"` -} - -// portalCreditsHandlerGET handles the API call to /portal/credits. -func (api *API) portalCreditsHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - c := api.portal.GetCredits() - WriteJSON(w, c) -} - -// portalCreditsHandlerPOST handles the API call to /portal/credits. -func (api *API) portalCreditsHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var params modules.CreditData - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - // Update the credit data. - api.portal.SetCredits(params) - - WriteSuccess(w) -} - -// portalAnnouncementHandlerGET handles the API call to /portal/announcement. -func (api *API) portalAnnouncementHandlerGET(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - text, expires, err := api.portal.GetAnnouncement() - if err != nil { - WriteError(w, Error{"internal error: " + err.Error()}, http.StatusInternalServerError) - return - } - WriteJSON(w, Announcement{ - Text: text, - Expires: expires, - }) -} - -// portalAnnouncementHandlerPOST handles the API call to /portal/announcement. -func (api *API) portalAnnouncementHandlerPOST(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse parameters. - var params Announcement - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - - // Set the announcement. - err = api.portal.SetAnnouncement(params.Text, params.Expires) - if err != nil { - WriteError(w, Error{"internal error: " + err.Error()}, http.StatusInternalServerError) - return - } - - WriteSuccess(w) -} diff --git a/node/api/routes.go b/node/api/routes.go deleted file mode 100644 index 5f70ce9..0000000 --- a/node/api/routes.go +++ /dev/null @@ -1,138 +0,0 @@ -package api - -import ( - "context" - "net/http" - "strings" - "time" - - "github.com/julienschmidt/httprouter" -) - -const ( - // httpServerTimeout defines the maximum amount of time before an HTTP call - // will timeout and an error will be returned. - httpServerTimeout = 24 * time.Hour -) - -// buildHttpRoutes sets up and returns an * httprouter.Router. -// it connected the Router to the given api using the required -// parameters: requiredUserAgent and requiredPassword. -func (api *API) buildHTTPRoutes() { - router := httprouter.New() - requiredPassword := api.requiredPassword - requiredUserAgent := api.requiredUserAgent - - router.NotFound = http.HandlerFunc(api.UnrecognizedCallHandler) - router.RedirectTrailingSlash = false - - // Daemon API Calls. - router.GET("/daemon/alerts", api.daemonAlertsHandlerGET) - router.GET("/daemon/stop", RequirePassword(api.daemonStopHandler, requiredPassword)) - router.GET("/daemon/version", api.daemonVersionHandler) - - // Consensus API Calls. - if api.cs != nil { - RegisterRoutesConsensus(router, api.cs) - } - - // Gateway API Calls. - if api.gateway != nil { - RegisterRoutesGateway(router, api.gateway, requiredPassword) - } - - // Transaction pool API Calls. - if api.tpool != nil { - RegisterRoutesTransactionPool(router, api.tpool) - } - - // Wallet API Calls. - if api.wallet != nil { - RegisterRoutesWallet(router, api.wallet, requiredPassword) - } - - // HostDB API Calls. - if api.manager != nil { - router.GET("/hostdb", api.hostdbHandler) - router.GET("/hostdb/active", api.hostdbActiveHandler) - router.GET("/hostdb/all", api.hostdbAllHandler) - router.GET("/hostdb/hosts/:pubkey", api.hostdbHostsHandler) - router.GET("/hostdb/filtermode", api.hostdbFilterModeHandlerGET) - router.POST("/hostdb/filtermode", RequirePassword(api.hostdbFilterModeHandlerPOST, requiredPassword)) - } - - // Manager API Calls. - if api.manager != nil { - router.GET("/manager/averages/:currency", api.managerAveragesHandlerGET) - router.GET("/manager/renters", RequirePassword(api.managerRentersHandlerGET, requiredPassword)) - router.GET("/manager/renter/:publickey", RequirePassword(api.managerRenterHandlerGET, requiredPassword)) - router.GET("/manager/balance/:publickey", RequirePassword(api.managerBalanceHandlerGET, requiredPassword)) - router.GET("/manager/contracts", RequirePassword(api.managerContractsHandlerGET, requiredPassword)) - router.GET("/manager/contracts/:publickey", RequirePassword(api.managerContractsHandlerGET, requiredPassword)) - router.GET("/manager/preferences", RequirePassword(api.managerPreferencesHandlerGET, requiredPassword)) - router.POST("/manager/preferences", RequirePassword(api.managerPreferencesHandlerPOST, requiredPassword)) - router.GET("/manager/prices", api.managerPricesHandlerGET) - router.POST("/manager/prices", RequirePassword(api.managerPricesHandlerPOST, requiredPassword)) - router.GET("/manager/maintenance", api.managerMaintenanceHandlerGET) - router.POST("/manager/maintenance", RequirePassword(api.managerMaintenanceHandlerPOST, requiredPassword)) - } - - // Portal API Calls. - if api.portal != nil { - router.GET("/portal/credits", RequirePassword(api.portalCreditsHandlerGET, requiredPassword)) - router.POST("/portal/credits", RequirePassword(api.portalCreditsHandlerPOST, requiredPassword)) - router.GET("/portal/announcement", api.portalAnnouncementHandlerGET) - router.POST("/portal/announcement", RequirePassword(api.portalAnnouncementHandlerPOST, requiredPassword)) - } - - // Apply UserAgent middleware and return the Router. - api.routerMu.Lock() - api.router = timeoutHandler(RequireUserAgent(router, requiredUserAgent), httpServerTimeout) - api.routerMu.Unlock() - return -} - -// timeoutHandler is a middleware that enforces a specific timeout on the route -// by closing the context after the httpServerTimeout. -func timeoutHandler(h http.Handler, timeout time.Duration) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // Create a new context with timeout. - ctx, cancel := context.WithTimeout(req.Context(), httpServerTimeout) - defer cancel() - - // Add the new context to the request and call the handler. - h.ServeHTTP(w, req.WithContext(ctx)) - }) -} - -// RequireUserAgent is middleware that requires all requests to set a -// UserAgent that contains the specified string. -func RequireUserAgent(h http.Handler, ua string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if !strings.Contains(req.UserAgent(), ua) { - WriteError(w, Error{"Browser access disabled due to security vulnerability."}, - http.StatusBadRequest) - return - } - h.ServeHTTP(w, req) - }) -} - -// RequirePassword is middleware that requires a request to authenticate with a -// password using HTTP basic auth. Usernames are ignored. Empty passwords -// indicate no authentication is required. -func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { - // An empty password is equivalent to no password. - if password == "" { - return h - } - return func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - _, pass, ok := req.BasicAuth() - if !ok || pass != password { - w.Header().Set("WWW-Authenticate", "Basic realm=\"SatAPI\"") - WriteError(w, Error{"API authentication failed."}, http.StatusUnauthorized) - return - } - h(w, req, ps) - } -} diff --git a/node/api/scan.go b/node/api/scan.go deleted file mode 100644 index e4ba8ee..0000000 --- a/node/api/scan.go +++ /dev/null @@ -1,53 +0,0 @@ -package api - -import ( - "math/big" - - "errors" - - "go.sia.tech/core/types" -) - -// scanAmount scans a types.Currency from a string. -func scanAmount(amount string) (types.Currency, bool) { - // use SetString manually to ensure that amount does not contain - // multiple values, which would confuse fmt.Scan. - i, ok := new(big.Int).SetString(amount, 10) - if !ok { - return types.ZeroCurrency, false - } else if i.Sign() < 0 { - return types.ZeroCurrency, false - } else if i.BitLen() > 128 { - return types.ZeroCurrency, false - } - return types.NewCurrency(i.Uint64(), new(big.Int).Rsh(i, 64).Uint64()), true -} - -// scanAddress scans a types.UnlockHash from a string. -func scanAddress(addrStr string) (addr types.Address, err error) { - err = addr.UnmarshalText([]byte(addrStr)) - if err != nil { - return types.Address{}, err - } - return addr, nil -} - -// scanHash scans a types.Hash256 from a string. -func scanHash(s string) (h types.Hash256, err error) { - err = h.UnmarshalText([]byte(s)) - if err != nil { - return types.Hash256{}, err - } - return h, nil -} - -// scanBool converts "true" and "false" strings to their respective -// boolean value and returns an error if conversion is not possible. -func scanBool(param string) (bool, error) { - if param == "true" { - return true, nil - } else if param == "false" || len(param) == 0 { - return false, nil - } - return false, errors.New("could not decode boolean: value was not true or false") -} diff --git a/node/api/server/core.go b/node/api/server/core.go new file mode 100644 index 0000000..5571322 --- /dev/null +++ b/node/api/server/core.go @@ -0,0 +1,104 @@ +package server + +import ( + "context" + "time" + + "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/core/gateway" + "go.sia.tech/core/types" + "go.sia.tech/jape" +) + +func (s *server) consensusNetworkHandler(jc jape.Context) { + jc.Encode(*s.cm.TipState().Network) +} + +func (s *server) consensusTipHandler(jc jape.Context) { + state := s.cm.TipState() + resp := api.ConsensusTipResponse{ + Height: state.Index.Height, + BlockID: state.Index.ID, + Synced: s.s.Synced() && time.Since(state.PrevTimestamps[0]) < 24*time.Hour, + } + jc.Encode(resp) +} + +func (s *server) consensusTipStateHandler(jc jape.Context) { + jc.Encode(s.cm.TipState()) +} + +func (s *server) syncerPeersHandler(jc jape.Context) { + var sp []api.SyncerPeer + for _, p := range s.s.Peers() { + sp = append(sp, api.SyncerPeer{ + Address: p.Addr(), + Version: p.Version(), + Inbound: p.Inbound, + }) + } + jc.Encode(sp) +} + +func (s *server) syncerConnectHandler(jc jape.Context) { + var addr string + if jc.Decode(&addr) != nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _, err := s.s.Connect(ctx, addr) + jc.Check("couldn't connect to peer", err) +} + +func (s *server) syncerBroadcastBlockHandler(jc jape.Context) { + var b types.Block + if jc.Decode(&b) != nil { + return + } else if jc.Check("block is invalid", s.cm.AddBlocks([]types.Block{b})) != nil { + return + } + if b.V2 == nil { + s.s.BroadcastHeader(gateway.BlockHeader{ + ParentID: b.ParentID, + Nonce: b.Nonce, + Timestamp: b.Timestamp, + MerkleRoot: b.MerkleRoot(), + }) + } else { + s.s.BroadcastV2BlockOutline(gateway.OutlineBlock(b, s.cm.PoolTransactions(), s.cm.V2PoolTransactions())) + } +} + +func (s *server) txpoolTransactionsHandler(jc jape.Context) { + jc.Encode(api.TxpoolTransactionsResponse{ + Transactions: s.cm.PoolTransactions(), + V2Transactions: s.cm.V2PoolTransactions(), + }) +} + +func (s *server) txpoolFeeHandler(jc jape.Context) { + jc.Encode(s.cm.RecommendedFee()) +} + +func (s *server) txpoolBroadcastHandler(jc jape.Context) { + var tbr api.TxpoolBroadcastRequest + if jc.Decode(&tbr) != nil { + return + } + if len(tbr.Transactions) != 0 { + _, err := s.cm.AddPoolTransactions(tbr.Transactions) + if jc.Check("invalid transaction set", err) != nil { + return + } + s.s.BroadcastTransactionSet(tbr.Transactions) + } + if len(tbr.V2Transactions) != 0 { + index := s.cm.TipState().Index + _, err := s.cm.AddV2PoolTransactions(index, tbr.V2Transactions) + if jc.Check("invalid v2 transaction set", err) != nil { + return + } + s.s.BroadcastV2TransactionSet(index, tbr.V2Transactions) + } +} diff --git a/node/api/server/hostdb.go b/node/api/server/hostdb.go new file mode 100644 index 0000000..e035a72 --- /dev/null +++ b/node/api/server/hostdb.go @@ -0,0 +1,144 @@ +package server + +import ( + "fmt" + "net/http" + + "github.com/mike76-dev/sia-satellite/modules" + "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/core/types" + "go.sia.tech/jape" +) + +func (s *server) hostdbHandler(jc jape.Context) { + isc, bh, err := s.m.InitialScanComplete() + if jc.Check("failed to get initial scan status", err) != nil { + return + } + + jc.Encode(api.HostdbGET{ + BlockHeight: bh, + InitialScanComplete: isc, + }) +} + +func (s *server) hostdbActiveHandler(jc jape.Context) { + var numHosts uint64 + if jc.DecodeForm("numHosts", &numHosts) != nil { + return + } + + hosts, err := s.m.ActiveHosts() + if jc.Check("unable to get active hosts", err) != nil { + return + } + + if numHosts == 0 || numHosts > uint64(len(hosts)) { + numHosts = uint64(len(hosts)) + } + + var extendedHosts []api.ExtendedHostDBEntry + for _, host := range hosts { + extendedHosts = append(extendedHosts, api.ExtendedHostDBEntry{ + HostDBEntry: host, + PublicKeyString: host.PublicKey.String(), + }) + } + + jc.Encode(api.HostdbHostsGET{ + Hosts: extendedHosts[:numHosts], + }) +} + +func (s *server) hostdbAllHandler(jc jape.Context) { + var numHosts uint64 + if jc.DecodeForm("numHosts", &numHosts) != nil { + return + } + + hosts, err := s.m.AllHosts() + if jc.Check("unable to get active hosts", err) != nil { + return + } + + if numHosts == 0 || numHosts > uint64(len(hosts)) { + numHosts = uint64(len(hosts)) + } + + var extendedHosts []api.ExtendedHostDBEntry + for _, host := range hosts { + extendedHosts = append(extendedHosts, api.ExtendedHostDBEntry{ + HostDBEntry: host, + PublicKeyString: host.PublicKey.String(), + }) + } + + jc.Encode(api.HostdbHostsGET{ + Hosts: extendedHosts[:numHosts], + }) +} + +func (s *server) hostdbHostHandler(jc jape.Context) { + var pk types.PublicKey + if jc.DecodeParam("publickey", &pk) != nil { + return + } + + entry, exists, err := s.m.Host(pk) + if jc.Check("unable to get host", err) != nil { + return + } + if !exists { + jc.Error(fmt.Errorf("requested host does not exist"), http.StatusBadRequest) + return + } + + breakdown, err := s.m.ScoreBreakdown(entry) + if jc.Check("error calculating score breakdown", err) != nil { + return + } + + // Extend the hostdb entry to have the public key string. + extendedEntry := api.ExtendedHostDBEntry{ + HostDBEntry: entry, + PublicKeyString: entry.PublicKey.String(), + } + jc.Encode(api.HostdbHostGET{ + Entry: extendedEntry, + ScoreBreakdown: breakdown, + }) +} + +func (s *server) hostdbFilterModeHandler(jc jape.Context) { + fm, hostMap, netAddresses, err := s.m.Filter() + if jc.Check("unable to get filter mode", err) != nil { + return + } + + var hosts []string + for key := range hostMap { + hosts = append(hosts, key) + } + + jc.Encode(api.HostdbFilterModeGET{ + FilterMode: fm.String(), + Hosts: hosts, + NetAddresses: netAddresses, + }) +} + +func (s *server) hostdbSetFilterModeHandler(jc jape.Context) { + var params api.HostdbFilterModePOST + if jc.Decode(¶ms) != nil { + return + } + + var fm modules.FilterMode + if jc.Check("unable to load filter mode from string", fm.FromString(params.FilterMode)) != nil { + return + } + + if jc.Check("failed to set the list mode", s.m.SetFilterMode(fm, params.Hosts, params.NetAddresses)) != nil { + return + } +} diff --git a/node/api/server/manager.go b/node/api/server/manager.go new file mode 100644 index 0000000..aa3baa1 --- /dev/null +++ b/node/api/server/manager.go @@ -0,0 +1,292 @@ +package server + +import ( + "fmt" + "strings" + + "github.com/mike76-dev/sia-satellite/modules" + "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/core/types" + "go.sia.tech/jape" +) + +func (s *server) managerAveragesHandler(jc jape.Context) { + var currency string + if jc.DecodeParam("currency", ¤cy) != nil { + return + } + currency = strings.ToUpper(currency) + if currency == "" { + currency = "SC" + } + + rate := float64(1.0) + var err error + if currency != "SC" { + rate, err = s.m.GetSiacoinRate(currency) + if jc.Check("couldn't get exchange rate", err) != nil { + return + } + } + + var ha api.HostAverages + ha.HostAverages = s.m.GetAverages() + ha.Rate = rate + + jc.Encode(ha) +} + +func (s *server) managerRentersHandler(jc jape.Context) { + renters := s.m.Renters() + + r := api.RentersGET{ + Renters: make([]api.Renter, 0, len(renters)), + } + + for _, renter := range renters { + r.Renters = append(r.Renters, api.Renter{ + Email: renter.Email, + PublicKey: renter.PublicKey, + }) + } + + jc.Encode(r) +} + +func (s *server) managerRenterHandler(jc jape.Context) { + var key types.PublicKey + if jc.DecodeParam("publickey", &key) != nil { + return + } + + renter, err := s.m.GetRenter(key) + if jc.Check("renter not found", err) != nil { + return + } + + jc.Encode(renter) +} + +func (s *server) managerBalanceHandler(jc jape.Context) { + var key types.PublicKey + if jc.DecodeParam("publickey", &key) != nil { + return + } + + renter, err := s.m.GetRenter(key) + if jc.Check("renter not found", err) != nil { + return + } + + ub, err := s.m.GetBalance(renter.Email) + if jc.Check("unable to get balance", err) != nil { + return + } + + jc.Encode(ub) +} + +func (s *server) managerContractsHandler(jc jape.Context) { + var pk string + if jc.DecodeParam("publickey", &pk) != nil { + return + } + + if pk == "" { + contracts := s.m.Contracts() + oldContracts := s.m.OldContracts() + jc.Encode(s.getContracts(contracts, oldContracts, modules.Renter{})) + return + } + + var key types.PublicKey + if jc.Check("couldn't unmarshal key", key.UnmarshalText([]byte(pk))) != nil { + return + } + + renter, err := s.m.GetRenter(key) + if jc.Check("renter not found", err) != nil { + return + } + + contracts := s.m.ContractsByRenter(key) + oldContracts := s.m.OldContractsByRenter(key) + + jc.Encode(s.getContracts(contracts, oldContracts, renter)) +} + +func (s *server) getContracts(contracts, oldContracts []modules.RenterContract, renter modules.Renter) api.RenterContracts { + var rc api.RenterContracts + currentBlockHeight := s.cm.Tip().Height + + for _, c := range contracts { + // Fetch host address. + var netAddress string + hdbe, exists, _ := s.m.Host(c.HostPublicKey) + if exists { + netAddress = hdbe.Settings.NetAddress + } + + // Build the contract. + contract := api.RenterContract{ + BadContract: c.Utility.BadContract, + DownloadSpending: c.DownloadSpending, + EndHeight: c.EndHeight, + Fees: c.TxnFee.Add(c.SiafundFee).Add(c.ContractFee), + FundAccountSpending: c.FundAccountSpending, + GoodForUpload: c.Utility.GoodForUpload, + GoodForRenew: c.Utility.GoodForRenew, + RenterPublicKey: c.RenterPublicKey, + HostPublicKey: c.HostPublicKey, + HostVersion: hdbe.Settings.Version, + ID: c.ID, + LastTransaction: c.Transaction, + NetAddress: netAddress, + MaintenanceSpending: c.MaintenanceSpending, + RenterFunds: c.RenterFunds, + Size: c.Size(), + StartHeight: c.StartHeight, + StorageSpending: c.StorageSpending, + TotalCost: c.TotalCost, + UploadSpending: c.UploadSpending, + } + + // Determine contract status. + refreshed := s.m.RefreshedContract(c.ID) + active := c.Utility.GoodForUpload && c.Utility.GoodForRenew && !refreshed + passive := !c.Utility.GoodForUpload && c.Utility.GoodForRenew && !refreshed + disabledContract := !active && !passive && !refreshed + + // A contract can either be active, passive, refreshed, or disabled. + statusErr := active && passive && refreshed || active && refreshed || active && passive || passive && refreshed + if statusErr { + fmt.Println("CRITICAL: Contract has multiple status types, this should never happen") + } else if active { + rc.ActiveContracts = append(rc.ActiveContracts, contract) + } else if passive { + rc.PassiveContracts = append(rc.PassiveContracts, contract) + } else if refreshed { + rc.RefreshedContracts = append(rc.RefreshedContracts, contract) + } else if disabledContract { + rc.DisabledContracts = append(rc.DisabledContracts, contract) + } + } + + // Process old contracts. + for _, c := range oldContracts { + var size uint64 + if len(c.Transaction.FileContractRevisions) != 0 { + size = c.Transaction.FileContractRevisions[0].Filesize + } + + // Fetch host address. + var netAddress string + hdbe, exists, _ := s.m.Host(c.HostPublicKey) + if exists { + netAddress = hdbe.Settings.NetAddress + } + + // Build the contract. + contract := api.RenterContract{ + BadContract: c.Utility.BadContract, + DownloadSpending: c.DownloadSpending, + EndHeight: c.EndHeight, + Fees: c.TxnFee.Add(c.SiafundFee).Add(c.ContractFee), + FundAccountSpending: c.FundAccountSpending, + GoodForUpload: c.Utility.GoodForUpload, + GoodForRenew: c.Utility.GoodForRenew, + RenterPublicKey: c.RenterPublicKey, + HostPublicKey: c.HostPublicKey, + HostVersion: hdbe.Settings.Version, + ID: c.ID, + LastTransaction: c.Transaction, + MaintenanceSpending: c.MaintenanceSpending, + NetAddress: netAddress, + RenterFunds: c.RenterFunds, + Size: size, + StartHeight: c.StartHeight, + StorageSpending: c.StorageSpending, + TotalCost: c.TotalCost, + UploadSpending: c.UploadSpending, + } + + // Determine contract status. + refreshed := s.m.RefreshedContract(c.ID) + endHeightInPast := c.EndHeight < uint64(currentBlockHeight) + if renter.Email != "" { + endHeightInPast = endHeightInPast || c.StartHeight < renter.CurrentPeriod + } + expiredContract := endHeightInPast && !refreshed + expiredRefreshed := endHeightInPast && refreshed + refreshedContract := refreshed && !endHeightInPast + disabledContract := !refreshed && !endHeightInPast + + // A contract can only be refreshed, disabled, expired, or expired refreshed. + if expiredContract { + rc.ExpiredContracts = append(rc.ExpiredContracts, contract) + } else if expiredRefreshed { + rc.ExpiredRefreshedContracts = append(rc.ExpiredRefreshedContracts, contract) + } else if refreshedContract { + rc.RefreshedContracts = append(rc.RefreshedContracts, contract) + } else if disabledContract { + rc.DisabledContracts = append(rc.DisabledContracts, contract) + } + } + + return rc +} + +func (s *server) managerPreferencesHandler(jc jape.Context) { + email, threshold := s.m.GetEmailPreferences() + ep := api.EmailPreferences{ + Email: email, + WarnThreshold: threshold, + } + + jc.Encode(ep) +} + +func (s *server) managerUpdatePreferencesHandler(jc jape.Context) { + var ep api.EmailPreferences + if jc.Decode(&ep) != nil { + return + } + + if jc.Check("failed to change preferences", s.m.SetEmailPreferences(ep.Email, ep.WarnThreshold)) != nil { + return + } +} + +func (s *server) managerPricesHandler(jc jape.Context) { + jc.Encode(modules.StaticPricing) +} + +func (s *server) managerUpdatePricesHandler(jc jape.Context) { + var prices modules.Pricing + if jc.Decode(&prices) != nil { + return + } + + if jc.Check("failed to change prices", s.m.UpdatePrices(prices)) != nil { + return + } +} + +func (s *server) managerMaintenanceHandler(jc jape.Context) { + jc.Encode(struct { + Maintenance bool `json:"maintenance"` + }{Maintenance: s.m.Maintenance()}) +} + +func (s *server) managerSetMaintenanceHandler(jc jape.Context) { + var start struct { + Start bool `json:"start"` + } + if jc.Decode(&start) != nil { + return + } + + if jc.Check("couldn't set maintenance flag", s.m.StartMaintenance(start.Start)) != nil { + return + } +} diff --git a/node/api/server/portal.go b/node/api/server/portal.go new file mode 100644 index 0000000..0a2f3d3 --- /dev/null +++ b/node/api/server/portal.go @@ -0,0 +1,43 @@ +package server + +import ( + "github.com/mike76-dev/sia-satellite/modules" + "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/jape" +) + +func (s *server) portalCreditsHandler(jc jape.Context) { + jc.Encode(s.p.GetCredits()) +} + +func (s *server) portalSetCreditsHandler(jc jape.Context) { + var params modules.CreditData + if jc.Decode(¶ms) != nil { + return + } + + s.p.SetCredits(params) +} + +func (s *server) portalAnnouncementHandler(jc jape.Context) { + text, expires, err := s.p.GetAnnouncement() + if jc.Check("failed to read announcement", err) != nil { + return + } + + jc.Encode(api.Announcement{ + Text: text, + Expires: expires, + }) +} + +func (s *server) portalSetAnnouncementHandler(jc jape.Context) { + var params api.Announcement + if jc.Decode(¶ms) != nil { + return + } + + if jc.Check("failed to set announcement", s.p.SetAnnouncement(params.Text, params.Expires)) != nil { + return + } +} diff --git a/node/api/server/server.go b/node/api/server/server.go index 3c5c8ba..d987f83 100644 --- a/node/api/server/server.go +++ b/node/api/server/server.go @@ -1,246 +1,99 @@ package server import ( - "context" - "encoding/binary" - "errors" - "fmt" "net" "net/http" - "os" "strings" - "sync" - "syscall" - "time" + "github.com/mike76-dev/sia-satellite/internal/build" "github.com/mike76-dev/sia-satellite/modules" "github.com/mike76-dev/sia-satellite/node" "github.com/mike76-dev/sia-satellite/node/api" - "github.com/mike76-dev/sia-satellite/persist" - - "go.sia.tech/core/types" - - "golang.org/x/crypto/blake2b" - - "lukechampine.com/frand" + "go.sia.tech/coreutils/chain" + "go.sia.tech/jape" ) -// A Server is a collection of modules that can be communicated with over an http API. -type Server struct { - api *api.API - apiServer *http.Server - listener net.Listener - - node *node.Node - requiredUserAgent string - - serveChan chan struct{} - serveErr error - - closeChan chan struct{} - - closeMu sync.Mutex -} - -// serve listens for and handles API calls. It is a blocking function. -func (srv *Server) serve() error { - // The server will run until an error is encountered or the listener is - // closed, via either the Close method or by signal handling. Closing the - // listener will result in the benign error handled below. - err := srv.apiServer.Serve(srv.listener) - if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { - return err - } - return nil +type server struct { + cm *chain.Manager + s modules.Syncer + m modules.Manager + p modules.Portal + w modules.Wallet } -// Close closes the Server's listener, causing the HTTP server to shut down. -func (srv *Server) Close() error { - defer close(srv.closeChan) - srv.closeMu.Lock() - defer srv.closeMu.Unlock() - // Stop accepting API requests. - err := srv.apiServer.Shutdown(context.Background()) - // Wait for serve() to return and capture its error. - <-srv.serveChan - if !modules.ContainsError(srv.serveErr, http.ErrServerClosed) { - err = modules.ComposeErrors(err, srv.serveErr) - } - // Shutdown modules. - if srv.node != nil { - err = modules.ComposeErrors(err, srv.node.Close()) +// newServer returns an HTTP handler that serves the hsd API. +func newServer(cm *chain.Manager, s modules.Syncer, m modules.Manager, p modules.Portal, w modules.Wallet) http.Handler { + srv := server{ + cm: cm, + s: s, + m: m, + p: p, + w: w, } - return modules.AddContext(err, "error while closing server") + return jape.Mux(map[string]jape.Handler{ + "GET /daemon/version": srv.versionHandler, + + "GET /consensus/network": srv.consensusNetworkHandler, + "GET /consensus/tip": srv.consensusTipHandler, + "GET /consensus/tipstate": srv.consensusTipStateHandler, + + "GET /syncer/peers": srv.syncerPeersHandler, + "POST /syncer/connect": srv.syncerConnectHandler, + "POST /syncer/broadcast/block": srv.syncerBroadcastBlockHandler, + + "GET /txpool/transactions": srv.txpoolTransactionsHandler, + "GET /txpool/fee": srv.txpoolFeeHandler, + "POST /txpool/broadcast": srv.txpoolBroadcastHandler, + + "GET /wallet/address": srv.walletAddressHandler, + "GET /wallet/addresses": srv.walletAddressesHandler, + "GET /wallet/balance": srv.walletBalanceHandler, + "GET /wallet/txpool": srv.walletTxpoolHandler, + "GET /wallet/outputs": srv.walletOutputsHandler, + "GET /wallet/watch": srv.walletWatchHandler, + "PUT /wallet/watch/:addr": srv.walletAddWatchHandler, + "DELETE /wallet/watch/:addr": srv.walletRemoveWatchHandler, + "POST /wallet/send": srv.walletSendHandler, + + "GET /manager/averages/:currency": srv.managerAveragesHandler, + "GET /manager/renters": srv.managerRentersHandler, + "GET /manager/renter/:publickey": srv.managerRenterHandler, + "GET /manager/balance/:publickey": srv.managerBalanceHandler, + "GET /manager/contracts/:publickey": srv.managerContractsHandler, + "GET /manager/preferences": srv.managerPreferencesHandler, + "POST /manager/preferences": srv.managerUpdatePreferencesHandler, + "GET /manager/prices": srv.managerPricesHandler, + "POST /manager/prices": srv.managerUpdatePricesHandler, + "GET /manager/maintenance": srv.managerMaintenanceHandler, + "POST /manager/maintenance": srv.managerSetMaintenanceHandler, + + "GET /hostdb": srv.hostdbHandler, + "GET /hostdb/active": srv.hostdbActiveHandler, + "GET /hostdb/all": srv.hostdbAllHandler, + "GET /hostdb/host/:publickey": srv.hostdbHostHandler, + "GET /hostdb/filtermode": srv.hostdbFilterModeHandler, + "POST /hostdb/filtermode": srv.hostdbSetFilterModeHandler, + + "GET /portal/credits": srv.portalCreditsHandler, + "POST /portal/credits": srv.portalSetCreditsHandler, + "GET /portal/announcement": srv.portalAnnouncementHandler, + "POST /portal/announcement": srv.portalSetAnnouncementHandler, + }) } -// WaitClose blocks until the server is done shutting down. -func (srv *Server) WaitClose() { - <-srv.closeChan -} - -// APIAddress returns the underlying node's api address. -func (srv *Server) APIAddress() string { - return srv.listener.Addr().String() -} - -// GatewayAddress returns the underlying node's gateway address. -func (srv *Server) GatewayAddress() modules.NetAddress { - return srv.node.Gateway.Address() -} - -// ServeErr is a blocking call that will return the result of srv.serve after -// the server stopped. -func (srv *Server) ServeErr() <-chan error { - c := make(chan error) - go func() { - <-srv.serveChan - close(c) - }() - return c -} - -// Unlock unlocks the wallet using the provided password. -func (srv *Server) Unlock(password string) error { - if srv.node.Wallet == nil { - return errors.New("server doesn't have a wallet") - } - var validKeys []modules.WalletKey - key, err := modules.KeyFromPhrase(password) - if err == nil { - h := blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - validKeys = append(validKeys, modules.WalletKey(wk)) - frand.Read(h[:]) - } - h := blake2b.Sum256([]byte(password)) - buf := make([]byte, 32+8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key = types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - validKeys = append(validKeys, modules.WalletKey(wk)) - frand.Read(h[:]) - for _, key := range validKeys { - if err := srv.node.Wallet.Unlock(key); err == nil { - return nil +func StartWeb(l net.Listener, node *node.Node, password string) error { + server := newServer(node.ChainManager, node.Syncer, node.Manager, node.Portal, node.Wallet) + api := jape.BasicAuth(password)(server) + return http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/api") { + r.URL.Path = strings.TrimPrefix(r.URL.Path, "/api") + api.ServeHTTP(w, r) + return } - } - return modules.ErrBadEncryptionKey + })) } -// NewAsync creates a new API server. The API will require authentication using -// HTTP basic auth if the supplied password is not the empty string. Usernames -// are ignored for authentication. This type of authentication sends passwords -// in plaintext and should therefore only be used if the apiAddr is localhost. -func NewAsync(config *persist.SatdConfig, apiPassword string, dbPassword string, loadStartTime time.Time) (*Server, <-chan error) { - c := make(chan error, 1) - defer close(c) - - var errChan <-chan error - var n *node.Node - s, err := func() (*Server, error) { - // Create the server listener. - listener, err := net.Listen("tcp", config.APIAddr) - if err != nil { - return nil, err - } - - // Create the api for the server. - api := api.New(config.UserAgent, apiPassword, nil, nil, nil, nil, nil, nil, nil) - srv := &Server{ - api: api, - apiServer: &http.Server{ - Handler: api, - - // ReadTimeout defines the maximum amount of time allowed to fully read - // the request body. This timeout is applied to every handler in the - // server. - ReadTimeout: time.Minute * 360, - - // ReadHeaderTimeout defines the amount of time allowed to fully read the - // request headers. - ReadHeaderTimeout: time.Minute * 2, - - // IdleTimeout defines the maximum duration a HTTP Keep-Alive connection - // the API is kept open with no activity before closing. - IdleTimeout: time.Minute * 5, - }, - closeChan: make(chan struct{}), - serveChan: make(chan struct{}), - listener: listener, - requiredUserAgent: config.UserAgent, - } - - // Set the shutdown method to allow the api to shutdown the server. - api.Shutdown = srv.Close - - // Spin up a goroutine that serves the API and closes srv.done when - // finished. - go func() { - srv.serveErr = srv.serve() - close(srv.serveChan) - }() - - // Create the node for the server after the server was started. - n, errChan = node.New(config, dbPassword, loadStartTime) - if err := modules.PeekErr(errChan); err != nil { - if isAddrInUseErr(err) { - return nil, fmt.Errorf("%v; are you running another instance of satd?", err.Error()) - } - return nil, fmt.Errorf("server is unable to create the satellite node: %s", err) - } - - // Make sure that the server wasn't shut down while loading the modules. - srv.closeMu.Lock() - defer srv.closeMu.Unlock() - select { - case <-srv.serveChan: - // Server was shut down. Close node and exit. - return srv, n.Close() - default: - } - - // Server wasn't shut down. Replace modules. - srv.node = n - api.SetModules(n.Gateway, n.ConsensusSet, n.Manager, n.Portal, n.Provider, n.TransactionPool, n.Wallet) - return srv, nil - }() - if err != nil { - if n != nil { - err = modules.ComposeErrors(err, n.Close()) - } - c <- err - return nil, c - } - return s, errChan -} - -// New creates a new API server. The API will require authentication using -// HTTP basic auth if the supplied password is not the empty string. -// Usernames are ignored for authentication. This type of authentication -// sends passwords in plaintext and should therefore only be used if the -// apiAddr is localhost. -func New(config *persist.SatdConfig, apiPassword string, dbPassword string, loadStartTime time.Time) (*Server, error) { - // Wait for the node to be done loading. - srv, errChan := NewAsync(config, apiPassword, dbPassword, loadStartTime) - if err := <-errChan; err != nil { - // Error occurred during async load. Close all modules. - fmt.Println("ERROR:", err) - return nil, err - } - return srv, nil -} - -// isAddrInUseErr checks if the error corresponds to syscall.EADDRINUSE. -func isAddrInUseErr(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if syscallErr, ok := opErr.Err.(*os.SyscallError); ok { - return syscallErr.Err == syscall.EADDRINUSE - } - } - return false +// versionHandler handles the API call that requests the daemon's version. +func (s *server) versionHandler(jc jape.Context) { + jc.Encode(api.DaemonVersion{Version: build.NodeVersion, GitRevision: build.GitRevision, BuildTime: build.BuildTime}) } diff --git a/node/api/server/wallet.go b/node/api/server/wallet.go new file mode 100644 index 0000000..6ee4506 --- /dev/null +++ b/node/api/server/wallet.go @@ -0,0 +1,86 @@ +package server + +import ( + "github.com/mike76-dev/sia-satellite/node/api" + "go.sia.tech/core/types" + "go.sia.tech/jape" +) + +func (s *server) walletAddressHandler(jc jape.Context) { + uc, err := s.w.NextAddress() + if jc.Check("unable to generate address", err) != nil { + return + } + jc.Encode(uc.UnlockHash()) +} + +func (s *server) walletAddressesHandler(jc jape.Context) { + addrs := s.w.Addresses() + jc.Encode(addrs) +} + +func (s *server) walletBalanceHandler(jc jape.Context) { + sc, isc, sf := s.w.ConfirmedBalance() + outgoing, incoming := s.w.UnconfirmedBalance() + height := s.w.Tip().Height + fee := s.cm.RecommendedFee() + resp := api.WalletBalanceResponse{ + Height: height, + Siacoins: sc, + ImmatureSiacoins: isc, + IncomingSiacoins: incoming, + OutgoingSiacoins: outgoing, + Siafunds: sf, + RecommendedFee: fee, + } + jc.Encode(resp) +} + +func (s *server) walletTxpoolHandler(jc jape.Context) { + pool := s.w.Annotate(s.cm.PoolTransactions()) + jc.Encode(pool) +} + +func (s *server) walletOutputsHandler(jc jape.Context) { + scos := s.w.UnspentSiacoinOutputs() + sfos := s.w.UnspentSiafundOutputs() + jc.Encode(api.WalletOutputsResponse{ + SiacoinOutputs: scos, + SiafundOutputs: sfos, + }) +} + +func (s *server) walletAddWatchHandler(jc jape.Context) { + var addr types.Address + if jc.DecodeParam("addr", &addr) != nil { + return + } else if jc.Check("couldn't add address", s.w.AddWatch(addr)) != nil { + return + } +} + +func (s *server) walletRemoveWatchHandler(jc jape.Context) { + var addr types.Address + if jc.DecodeParam("addr", &addr) != nil { + return + } else if jc.Check("couldn't remove address", s.w.RemoveWatch(addr)) != nil { + return + } +} + +func (s *server) walletWatchHandler(jc jape.Context) { + addrs := s.w.WatchedAddresses() + jc.Encode(addrs) +} + +func (s *server) walletSendHandler(jc jape.Context) { + var wsr api.WalletSendRequest + if jc.Decode(&wsr) != nil { + return + } + + _, err := s.w.SendSiacoins(wsr.Amount, wsr.Destination) + if jc.Check("couldn't send Siacoins", err) != nil { + return + } +} diff --git a/node/api/transactionpool.go b/node/api/transactionpool.go deleted file mode 100644 index 012277f..0000000 --- a/node/api/transactionpool.go +++ /dev/null @@ -1,191 +0,0 @@ -package api - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "strings" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" -) - -type ( - // TpoolFeeGET contains the current estimated fee. - TpoolFeeGET struct { - Minimum types.Currency `json:"minimum"` - Maximum types.Currency `json:"maximum"` - } - - // TpoolRawGET contains the requested transaction encoded to the raw - // format, along with the id of that transaction. - TpoolRawGET struct { - ID types.TransactionID `json:"id"` - Parents []byte `json:"parents"` - Transaction []byte `json:"transaction"` - } - - // TpoolConfirmedGET contains information about whether or not - // the transaction has been seen on the blockhain. - TpoolConfirmedGET struct { - Confirmed bool `json:"confirmed"` - } - - // TpoolTxnsGET contains the information about the tpool's transactions. - TpoolTxnsGET struct { - Transactions []types.Transaction `json:"transactions"` - } -) - -// RegisterRoutesTransactionPool is a helper function to register all -// transaction pool routes. -func RegisterRoutesTransactionPool(router *httprouter.Router, tpool modules.TransactionPool) { - router.GET("/tpool/fee", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - tpoolFeeHandlerGET(tpool, w, req, ps) - }) - router.GET("/tpool/raw/:id", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - tpoolRawHandlerGET(tpool, w, req, ps) - }) - router.POST("/tpool/raw", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - tpoolRawHandlerPOST(tpool, w, req, ps) - }) - router.GET("/tpool/confirmed/:id", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - tpoolConfirmedGET(tpool, w, req, ps) - }) - router.GET("/tpool/transactions", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - tpoolTransactionsHandler(tpool, w, req, ps) - }) -} - -// decodeTransactionID will decode a transaction id from a string. -func decodeTransactionID(txidStr string) (txid types.TransactionID, err error) { - err = txid.UnmarshalText([]byte(txidStr)) - return -} - -// tpoolFeeHandlerGET returns the current estimated fee. Transactions with -// fees lower than the estimated fee may take longer to confirm. -func tpoolFeeHandlerGET(tpool modules.TransactionPool, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - min, max := tpool.FeeEstimation() - WriteJSON(w, TpoolFeeGET{ - Minimum: min, - Maximum: max, - }) -} - -// tpoolRawHandlerGET will provide the raw byte representation of a -// transaction that matches the input id. -func tpoolRawHandlerGET(tpool modules.TransactionPool, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - txid, err := decodeTransactionID(ps.ByName("id")) - if err != nil { - WriteError(w, Error{"error decoding transaction id: " + err.Error()}, http.StatusBadRequest) - return - } - txn, parents, exists := tpool.Transaction(txid) - if !exists { - WriteError(w, Error{"transaction not found in transaction pool"}, http.StatusBadRequest) - return - } - - var p, t bytes.Buffer - e := types.NewEncoder(&p) - e.WritePrefix(len(parents)) - for _, parent := range parents { - parent.EncodeTo(e) - } - e.Flush() - e = types.NewEncoder(&t) - txn.EncodeTo(e) - e.Flush() - WriteJSON(w, TpoolRawGET{ - ID: txid, - Parents: p.Bytes(), - Transaction: t.Bytes(), - }) -} - -// tpoolRawHandlerPOST takes a raw encoded transaction set and posts -// it to the transaction pool, relaying it to the transaction pool's peers -// regardless of if the set is accepted. -func tpoolRawHandlerPOST(tpool modules.TransactionPool, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var parents []types.Transaction - var txn types.Transaction - - // JSON, base64, and raw binary are accepted. - if err := json.Unmarshal([]byte(req.FormValue("parents")), &parents); err != nil { - rawParents, err := base64.StdEncoding.DecodeString(req.FormValue("parents")) - if err != nil { - rawParents = []byte(req.FormValue("parents")) - } - pBuf := bytes.NewBuffer(rawParents) - d := types.NewDecoder(io.LimitedReader{R: pBuf, N: int64(len(rawParents))}) - l := d.ReadPrefix() - if err := d.Err(); err != nil { - WriteError(w, Error{"error decoding parents: " + err.Error()}, http.StatusBadRequest) - return - } - parents = make([]types.Transaction, l) - for i := 0; i < l; i++ { - parents[i].DecodeFrom(d) - if err := d.Err(); err != nil { - WriteError(w, Error{"error decoding parents: " + err.Error()}, http.StatusBadRequest) - return - } - } - } - if err := json.Unmarshal([]byte(req.FormValue("transaction")), &txn); err != nil { - rawTransaction, err := base64.StdEncoding.DecodeString(req.FormValue("transaction")) - if err != nil { - rawTransaction = []byte(req.FormValue("transaction")) - } - tBuf := bytes.NewBuffer(rawTransaction) - d := types.NewDecoder(io.LimitedReader{R: tBuf, N: int64(len(rawTransaction))}) - txn.DecodeFrom(d) - if err := d.Err(); err != nil { - WriteError(w, Error{"error decoding transaction: " + err.Error()}, http.StatusBadRequest) - return - } - } - - // Broadcast the transaction set, so that they are passed to any peers that - // may have rejected them earlier. - txnSet := append(parents, txn) - tpool.Broadcast(txnSet) - err := tpool.AcceptTransactionSet(txnSet) - if err != nil && !strings.Contains(err.Error(), modules.ErrDuplicateTransactionSet.Error()) { - WriteError(w, Error{"error accepting transaction set: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} - -// tpoolConfirmedGET returns whether the specified transaction has -// been seen on the blockchain. -func tpoolConfirmedGET(tpool modules.TransactionPool, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - txid, err := decodeTransactionID(ps.ByName("id")) - if err != nil { - WriteError(w, Error{"error decoding transaction id: " + err.Error()}, http.StatusBadRequest) - return - } - confirmed, err := tpool.TransactionConfirmed(txid) - if err != nil { - WriteError(w, Error{"error fetching transaction status: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, TpoolConfirmedGET{ - Confirmed: confirmed, - }) -} - -// tpoolTransactionsHandler returns the current transactions of the transaction -// pool -func tpoolTransactionsHandler(tpool modules.TransactionPool, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - txns := tpool.Transactions() - WriteJSON(w, TpoolTxnsGET{ - Transactions: txns, - }) -} diff --git a/node/api/wallet.go b/node/api/wallet.go deleted file mode 100644 index f15300f..0000000 --- a/node/api/wallet.go +++ /dev/null @@ -1,851 +0,0 @@ -package api - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "math" - "net/http" - "strconv" - - "github.com/julienschmidt/httprouter" - "github.com/mike76-dev/sia-satellite/modules" - - "go.sia.tech/core/types" - - "golang.org/x/crypto/blake2b" - - "lukechampine.com/frand" -) - -type ( - // WalletGET contains general information about the wallet. - WalletGET struct { - Encrypted bool `json:"encrypted"` - Height uint64 `json:"height"` - Rescanning bool `json:"rescanning"` - Unlocked bool `json:"unlocked"` - - ConfirmedSiacoinBalance types.Currency `json:"confirmedsiacoinbalance"` - UnconfirmedOutgoingSiacoins types.Currency `json:"unconfirmedoutgoingsiacoins"` - UnconfirmedIncomingSiacoins types.Currency `json:"unconfirmedincomingsiacoins"` - - DustThreshold types.Currency `json:"dustthreshold"` - } - - // WalletAddressGET contains an address returned by a GET call to - // /wallet/address. - WalletAddressGET struct { - Address types.Address `json:"address"` - } - - // WalletAddressesGET contains the list of wallet addresses returned by a - // GET call to /wallet/addresses. - WalletAddressesGET struct { - Addresses []types.Address `json:"addresses"` - } - - // WalletInitPOST contains the primary seed that gets generated during a - // POST call to /wallet/init. - WalletInitPOST struct { - PrimarySeed string `json:"primaryseed"` - } - - // WalletSiacoinsPOST contains the transaction sent in the POST call to - // /wallet/siacoins. - WalletSiacoinsPOST struct { - Transactions []types.Transaction `json:"transactions"` - TransactionIDs []types.TransactionID `json:"transactionids"` - } - - // WalletSignPOSTParams contains the unsigned transaction and a set of - // inputs to sign. - WalletSignPOSTParams struct { - Transaction types.Transaction `json:"transaction"` - ToSign []types.Hash256 `json:"tosign"` - } - - // WalletSignPOSTResp contains the signed transaction. - WalletSignPOSTResp struct { - Transaction types.Transaction `json:"transaction"` - } - - // WalletSeedsGET contains the seeds used by the wallet. - WalletSeedsGET struct { - PrimarySeed string `json:"primaryseed"` - AddressesRemaining int `json:"addressesremaining"` - AllSeeds []string `json:"allseeds"` - } - - // WalletSweepPOST contains the coins returned by a call to - // /wallet/sweep. - WalletSweepPOST struct { - Coins types.Currency `json:"coins"` - } - - // WalletTransactionGETid contains the transaction returned by a call to - // /wallet/transaction/:id - WalletTransactionGETid struct { - Transaction modules.ProcessedTransaction `json:"transaction"` - } - - // WalletTransactionsGET contains the specified set of confirmed and - // unconfirmed transactions. - WalletTransactionsGET struct { - ConfirmedTransactions []modules.ProcessedTransaction `json:"confirmedtransactions"` - UnconfirmedTransactions []modules.ProcessedTransaction `json:"unconfirmedtransactions"` - } - - // WalletTransactionsGETaddr contains the set of wallet transactions - // relevant to the input address provided in the call to - // /wallet/transaction/:addr. - WalletTransactionsGETaddr struct { - ConfirmedTransactions []modules.ProcessedTransaction `json:"confirmedtransactions"` - UnconfirmedTransactions []modules.ProcessedTransaction `json:"unconfirmedtransactions"` - } - - // WalletUnlockConditionsGET contains a set of unlock conditions. - WalletUnlockConditionsGET struct { - UnlockConditions types.UnlockConditions `json:"unlockconditions"` - } - - // WalletUnlockConditionsPOSTParams contains a set of unlock conditions. - WalletUnlockConditionsPOSTParams struct { - UnlockConditions types.UnlockConditions `json:"unlockconditions"` - } - - // WalletUnspentGET contains the unspent outputs tracked by the wallet. - // The MaturityHeight field of each output indicates the height of the - // block that the output appeared in. - WalletUnspentGET struct { - Outputs []modules.UnspentOutput `json:"outputs"` - } - - // WalletVerifyAddressGET contains a bool indicating if the address passed to - // /wallet/verify/address/:addr is a valid address. - WalletVerifyAddressGET struct { - Valid bool `json:"valid"` - } - - // WalletVerifyPasswordGET contains a bool indicating if the password passed - // to /wallet/verifypassword is the password being used to encrypt the - // wallet. - WalletVerifyPasswordGET struct { - Valid bool `json:"valid"` - } - - // WalletWatchPOST contains the set of addresses to add or remove from the - // watch set. - WalletWatchPOST struct { - Addresses []types.Address `json:"addresses"` - Remove bool `json:"remove"` - Unused bool `json:"unused"` - } - - // WalletWatchGET contains the set of addresses that the wallet is - // currently watching. - WalletWatchGET struct { - Addresses []types.Address `json:"addresses"` - } -) - -// RegisterRoutesWallet is a helper function to register all wallet routes. -func RegisterRoutesWallet(router *httprouter.Router, wallet modules.Wallet, requiredPassword string) { - router.GET("/wallet", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletHandler(wallet, w, req, ps) - }) - router.GET("/wallet/address", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletAddressHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/addresses", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletAddressesHandler(wallet, w, req, ps) - }) - router.GET("/wallet/seedaddrs", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSeedAddressesHandler(wallet, w, req, ps) - }) - router.POST("/wallet/init", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletInitHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/init/seed", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletInitSeedHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/lock", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletLockHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/seed", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSeedHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/seeds", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSeedsHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/siacoins", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSiacoinsHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/sweep/seed", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSweepSeedHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/transaction/:id", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletTransactionHandler(wallet, w, req, ps) - }) - router.GET("/wallet/transactions", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletTransactionsHandler(wallet, w, req, ps) - }) - router.GET("/wallet/transactions/:addr", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletTransactionsAddrHandler(wallet, w, req, ps) - }) - router.GET("/wallet/verify/address/:addr", func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletVerifyAddressHandler(w, req, ps) - }) - router.POST("/wallet/unlock", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletUnlockHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/changepassword", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletChangePasswordHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/verifypassword", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletVerifyPasswordHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/unlockconditions/:addr", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletUnlockConditionsHandlerGET(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/unlockconditions", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletUnlockConditionsHandlerPOST(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/unspent", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletUnspentHandler(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/sign", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletSignHandler(wallet, w, req, ps) - }, requiredPassword)) - router.GET("/wallet/watch", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletWatchHandlerGET(wallet, w, req, ps) - }, requiredPassword)) - router.POST("/wallet/watch", RequirePassword(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - walletWatchHandlerPOST(wallet, w, req, ps) - }, requiredPassword)) -} - -// encryptionKeys enumerates the possible encryption keys that can be derived -// from an input string. -func encryptionKeys(seedStr string) (validKeys []modules.WalletKey, seed modules.Seed) { - var err error - seed, err = modules.DecodeBIP39Phrase(seedStr) - if err == nil { - h := blake2b.Sum256(seed[:]) - buf := make([]byte, 32 + 8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - validKeys = append(validKeys, modules.WalletKey(wk)) - frand.Read(h[:]) - } - h := blake2b.Sum256([]byte(seedStr)) - buf := make([]byte, 32 + 8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - validKeys = append(validKeys, modules.WalletKey(wk)) - frand.Read(h[:]) - return -} - -// walletHander handles API calls to /wallet. -func walletHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - siacoinBal, _, _, err := wallet.ConfirmedBalance() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - siacoinsOut, siacoinsIn, err := wallet.UnconfirmedBalance() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - dustThreshold, err := wallet.DustThreshold() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - encrypted, err := wallet.Encrypted() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - unlocked, err := wallet.Unlocked() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - rescanning, err := wallet.Rescanning() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - height, err := wallet.Height() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletGET{ - Encrypted: encrypted, - Unlocked: unlocked, - Rescanning: rescanning, - Height: height, - - ConfirmedSiacoinBalance: siacoinBal, - UnconfirmedOutgoingSiacoins: siacoinsOut, - UnconfirmedIncomingSiacoins: siacoinsIn, - - DustThreshold: dustThreshold, - }) -} - -// walletAddressHandler handles API calls to /wallet/address. -func walletAddressHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - unlockConditions, err := wallet.NextAddress() - if err != nil { - WriteError(w, Error{"error when calling /wallet/addresses: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletAddressGET{ - Address: unlockConditions.UnlockHash(), - }) -} - -// walletSeedAddressesHandler handles the requests to /wallet/seedaddrs. -func walletSeedAddressesHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Parse the count argument. If it isn't specified we return as many - // addresses as possible. - count := uint64(math.MaxUint64) - c := req.FormValue("count") - if c != "" { - _, err := fmt.Sscan(c, &count) - if err != nil { - WriteError(w, Error{"Failed to parse count: " + err.Error()}, http.StatusBadRequest) - return - } - } - // Get the last count addresses. - addresses, err := wallet.LastAddresses(count) - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet/addresses: %v", err)}, http.StatusBadRequest) - return - } - // Send the response. - WriteJSON(w, WalletAddressesGET{ - Addresses: addresses, - }) -} - -// walletAddressHandler handles API calls to /wallet/addresses. -func walletAddressesHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - addresses, err := wallet.AllAddresses() - if err != nil { - WriteError(w, Error{fmt.Sprintf("Error when calling /wallet/addresses: %v", err)}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletAddressesGET{ - Addresses: addresses, - }) -} - -// walletInitHandler handles API calls to /wallet/init. -func walletInitHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var encryptionKey modules.WalletKey - if req.FormValue("encryptionpassword") != "" { - h := blake2b.Sum256([]byte(req.FormValue("encryptionpassword"))) - buf := make([]byte, 32 + 8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - encryptionKey = modules.WalletKey(wk) - frand.Read(h[:]) - } - - if req.FormValue("force") == "true" { - err := wallet.Reset() - if err != nil { - WriteError(w, Error{"error when calling /wallet/init: " + err.Error()}, http.StatusBadRequest) - return - } - } - seed, err := wallet.Encrypt(encryptionKey) - if err != nil { - WriteError(w, Error{"error when calling /wallet/init: " + err.Error()}, http.StatusBadRequest) - return - } - - seedStr := modules.EncodeBIP39Phrase(seed) - WriteJSON(w, WalletInitPOST{ - PrimarySeed: seedStr, - }) -} - -// walletInitSeedHandler handles API calls to /wallet/init/seed. -func walletInitSeedHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var encryptionKey modules.WalletKey - if req.FormValue("encryptionpassword") != "" { - h := blake2b.Sum256([]byte(req.FormValue("encryptionpassword"))) - buf := make([]byte, 32 + 8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - encryptionKey = modules.WalletKey(wk) - frand.Read(h[:]) - } - seed, err := modules.DecodeBIP39Phrase(req.FormValue("seed")) - if err != nil { - WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) - return - } - - if req.FormValue("force") == "true" { - err = wallet.Reset() - if err != nil { - WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) - return - } - } - - err = wallet.InitFromSeed(encryptionKey, seed) - if err != nil { - WriteError(w, Error{"error when calling /wallet/init/seed: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} - -// walletSeedHandler handles API calls to /wallet/seed. -func walletSeedHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Get the seed using the dictionary. - seed, err := modules.DecodeBIP39Phrase(req.FormValue("seed")) - if err != nil { - WriteError(w, Error{"error when calling /wallet/seed: " + err.Error()}, http.StatusBadRequest) - return - } - - potentialKeys, _ := encryptionKeys(req.FormValue("encryptionpassword")) - for _, key := range potentialKeys { - err := wallet.LoadSeed(key, seed) - if err == nil { - WriteSuccess(w) - return - } - if !modules.ContainsError(err, modules.ErrBadEncryptionKey) { - WriteError(w, Error{"error when calling /wallet/seed: " + err.Error()}, http.StatusBadRequest) - return - } - } - WriteError(w, Error{"error when calling /wallet/seed: " + modules.ErrBadEncryptionKey.Error()}, http.StatusBadRequest) -} - -// walletLockHandler handles API calls to /wallet/lock. -func walletLockHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - err := wallet.Lock() - if err != nil { - WriteError(w, Error{err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} - -// walletSeedsHandler handles API calls to /wallet/seeds. -func walletSeedsHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Get the primary seed information. - primarySeed, addrsRemaining, err := wallet.PrimarySeed() - if err != nil { - WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) - return - } - primarySeedStr := modules.EncodeBIP39Phrase(primarySeed) - - // Get the list of seeds known to the wallet. - allSeeds, err := wallet.AllSeeds() - if err != nil { - WriteError(w, Error{"error when calling /wallet/seeds: " + err.Error()}, http.StatusBadRequest) - return - } - var allSeedsStrs []string - for _, seed := range allSeeds { - str := modules.EncodeBIP39Phrase(seed) - allSeedsStrs = append(allSeedsStrs, str) - } - WriteJSON(w, WalletSeedsGET{ - PrimarySeed: primarySeedStr, - AddressesRemaining: int(addrsRemaining), - AllSeeds: allSeedsStrs, - }) -} - -// walletSiacoinsHandler handles API calls to /wallet/siacoins. -func walletSiacoinsHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var txns []types.Transaction - if req.FormValue("outputs") != "" { - // Multiple amounts + destinations. - if req.FormValue("amount") != "" || req.FormValue("destination") != "" || req.FormValue("feeIncluded") != "" { - WriteError(w, Error{"cannot supply both 'outputs' and single amount+destination pair and/or feeIncluded parameter"}, http.StatusInternalServerError) - return - } - - var outputs []types.SiacoinOutput - err := json.Unmarshal([]byte(req.FormValue("outputs")), &outputs) - if err != nil { - WriteError(w, Error{"could not decode outputs: " + err.Error()}, http.StatusInternalServerError) - return - } - txns, err = wallet.SendSiacoinsMulti(outputs) - if err != nil { - WriteError(w, Error{"error when calling /wallet/siacoins: " + err.Error()}, http.StatusInternalServerError) - return - } - } else { - // Single amount + destination. - amount, ok := scanAmount(req.FormValue("amount")) - if !ok { - WriteError(w, Error{"could not read amount from POST call to /wallet/siacoins"}, http.StatusBadRequest) - return - } - dest, err := scanAddress(req.FormValue("destination")) - if err != nil { - WriteError(w, Error{"could not read address from POST call to /wallet/siacoins"}, http.StatusBadRequest) - return - } - feeIncluded, err := scanBool(req.FormValue("feeIncluded")) - if err != nil { - WriteError(w, Error{"could not read feeIncluded from POST call to /wallet/siacoins"}, http.StatusBadRequest) - return - } - - if feeIncluded { - txns, err = wallet.SendSiacoinsFeeIncluded(amount, dest) - } else { - txns, err = wallet.SendSiacoins(amount, dest) - } - if err != nil { - WriteError(w, Error{"error when calling /wallet/siacoins: " + err.Error()}, http.StatusInternalServerError) - return - } - } - - var txids []types.TransactionID - for _, txn := range txns { - txids = append(txids, txn.ID()) - } - WriteJSON(w, WalletSiacoinsPOST{ - Transactions: txns, - TransactionIDs: txids, - }) -} - -// walletSweepSeedHandler handles API calls to /wallet/sweep/seed. -func walletSweepSeedHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - // Get the seed using the dictionary + phrase. - seed, err := modules.DecodeBIP39Phrase(req.FormValue("seed")) - if err != nil { - WriteError(w, Error{"error when calling /wallet/sweep/seed: " + err.Error()}, http.StatusBadRequest) - return - } - - coins, _, err := wallet.SweepSeed(seed) - if err != nil { - WriteError(w, Error{"error when calling /wallet/sweep/seed: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletSweepPOST{ - Coins: coins, - }) -} - -// walletTransactionHandler handles API calls to /wallet/transaction/:id. -func walletTransactionHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - // Parse the id from the url. - var id types.TransactionID - jsonID := "\"" + ps.ByName("id") + "\"" - err := json.Unmarshal([]byte(jsonID), &id) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transaction/id: " + err.Error()}, http.StatusBadRequest) - return - } - - txn, ok, err := wallet.Transaction(id) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transaction/id: " + err.Error()}, http.StatusBadRequest) - return - } - if !ok { - WriteError(w, Error{"error when calling /wallet/transaction/id : transaction not found"}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletTransactionGETid{ - Transaction: txn, - }) -} - -// walletTransactionsHandler handles API calls to /wallet/transactions. -func walletTransactionsHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - startheightStr, endheightStr := req.FormValue("startheight"), req.FormValue("endheight") - if startheightStr == "" || endheightStr == "" { - WriteError(w, Error{"startheight and endheight must be provided to a /wallet/transactions call."}, http.StatusBadRequest) - return - } - // Get the start and end blocks. - start, err := strconv.ParseUint(startheightStr, 10, 64) - if err != nil { - WriteError(w, Error{"parsing integer value for parameter `startheight` failed: " + err.Error()}, http.StatusBadRequest) - return - } - // Check if endheightStr is set to -1. If it is, we use MaxUint64 as the - // end. Otherwise we parse the argument as an unsigned integer. - var end uint64 - if endheightStr == "-1" { - end = math.MaxUint64 - } else { - end, err = strconv.ParseUint(endheightStr, 10, 64) - } - if err != nil { - WriteError(w, Error{"parsing integer value for parameter `endheight` failed: " + err.Error()}, http.StatusBadRequest) - return - } - confirmedTxns, err := wallet.Transactions(start, end) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) - return - } - unconfirmedTxns, err := wallet.UnconfirmedTransactions() - if err != nil { - WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) - return - } - - WriteJSON(w, WalletTransactionsGET{ - ConfirmedTransactions: confirmedTxns, - UnconfirmedTransactions: unconfirmedTxns, - }) -} - -// walletTransactionsAddrHandler handles API calls to -// /wallet/transactions/:addr. -func walletTransactionsAddrHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - // Parse the address being input. - jsonAddr := "\"" + ps.ByName("addr") + "\"" - var addr types.Address - err := json.Unmarshal([]byte(jsonAddr), &addr) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) - return - } - - confirmedATs, err := wallet.AddressTransactions(addr) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) - return - } - unconfirmedATs, err := wallet.AddressUnconfirmedTransactions(addr) - if err != nil { - WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletTransactionsGETaddr{ - ConfirmedTransactions: confirmedATs, - UnconfirmedTransactions: unconfirmedATs, - }) -} - -// walletUnlockHandler handles API calls to /wallet/unlock. -func walletUnlockHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - potentialKeys, _ := encryptionKeys(req.FormValue("encryptionpassword")) - var err error - for _, key := range potentialKeys { - errChan := wallet.UnlockAsync(key) - var unlockErr error - select { - case unlockErr = <-errChan: - default: - } - if unlockErr == nil { - WriteSuccess(w) - return - } - err = modules.ComposeErrors(err, unlockErr) - } - WriteError(w, Error{"error when calling /wallet/unlock: " + err.Error()}, http.StatusBadRequest) -} - -// walletChangePasswordHandler handles API calls to /wallet/changepassword -func walletChangePasswordHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var newKey modules.WalletKey - newPassword := req.FormValue("newpassword") - if newPassword == "" { - WriteError(w, Error{"a password must be provided to newpassword"}, http.StatusBadRequest) - return - } - - h := blake2b.Sum256([]byte(newPassword)) - buf := make([]byte, 32 + 8) - copy(buf[:32], h[:]) - binary.LittleEndian.PutUint64(buf[32:], 0) - h = blake2b.Sum256(buf) - key := types.NewPrivateKeyFromSeed(h[:]) - h = blake2b.Sum256(key[:]) - frand.Read(key[:]) - wk := make([]byte, len(h)) - copy(wk, h[:]) - frand.Read(h[:]) - newKey = modules.WalletKey(wk) - - originalKeys, seed := encryptionKeys(req.FormValue("encryptionpassword")) - var err error - for _, key := range originalKeys { - keyErr := wallet.ChangeKey(key, newKey) - if keyErr == nil { - WriteSuccess(w) - return - } - err = modules.ComposeErrors(err, keyErr) - } - seedErr := wallet.ChangeKeyWithSeed(seed, newKey) - if seedErr == nil { - WriteSuccess(w) - return - } - err = modules.ComposeErrors(err, seedErr) - WriteError(w, Error{"error when calling /wallet/changepassword: " + err.Error()}, http.StatusBadRequest) - return -} - -// walletVerifyPasswordHandler handles API calls to /wallet/verifypassword -func walletVerifyPasswordHandler(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - originalKeys, _ := encryptionKeys(req.FormValue("password")) - var err error - for _, key := range originalKeys { - valid, keyErr := wallet.IsMasterKey(key) - if keyErr == nil { - WriteJSON(w, WalletVerifyPasswordGET{ - Valid: valid, - }) - return - } - err = modules.ComposeErrors(err, keyErr) - } - WriteError(w, Error{"error when calling /wallet/verifypassword: " + err.Error()}, http.StatusBadRequest) -} - -// walletVerifyAddressHandler handles API calls to /wallet/verify/address/:addr. -func walletVerifyAddressHandler(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - addrString := ps.ByName("addr") - - err := new(types.Address).UnmarshalText([]byte(addrString)) - WriteJSON(w, WalletVerifyAddressGET{Valid: err == nil}) -} - -// walletUnlockConditionsHandlerGET handles GET calls to /wallet/unlockconditions. -func walletUnlockConditionsHandlerGET(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, ps httprouter.Params) { - var addr types.Address - err := addr.UnmarshalText([]byte(ps.ByName("addr"))) - if err != nil { - WriteError(w, Error{"error when calling /wallet/unlockconditions: " + err.Error()}, http.StatusBadRequest) - return - } - uc, err := wallet.UnlockConditions(addr) - if err != nil { - WriteError(w, Error{"error when calling /wallet/unlockconditions: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletUnlockConditionsGET{ - UnlockConditions: uc, - }) -} - -// walletUnlockConditionsHandlerPOST handles POST calls to /wallet/unlockconditions. -func walletUnlockConditionsHandlerPOST(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var params WalletUnlockConditionsPOSTParams - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - err = wallet.AddUnlockConditions(params.UnlockConditions) - if err != nil { - WriteError(w, Error{"error when calling /wallet/unlockconditions: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} - -// walletUnspentHandler handles API calls to /wallet/unspent. -func walletUnspentHandler(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - outputs, err := wallet.UnspentOutputs() - if err != nil { - WriteError(w, Error{"error when calling /wallet/unspent: " + err.Error()}, http.StatusInternalServerError) - return - } - WriteJSON(w, WalletUnspentGET{ - Outputs: outputs, - }) -} - -// walletSignHandler handles API calls to /wallet/sign. -func walletSignHandler(wt modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var params WalletSignPOSTParams - err := json.NewDecoder(req.Body).Decode(¶ms) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - err = wt.SignTransaction(¶ms.Transaction, params.ToSign, modules.FullCoveredFields()) - if err != nil { - WriteError(w, Error{"failed to sign transaction: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletSignPOSTResp{ - Transaction: params.Transaction, - }) -} - -// walletWatchHandlerGET handles GET calls to /wallet/watch. -func walletWatchHandlerGET(wallet modules.Wallet, w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - addrs, err := wallet.WatchAddresses() - if err != nil { - WriteError(w, Error{"failed to get watch addresses: " + err.Error()}, http.StatusBadRequest) - return - } - WriteJSON(w, WalletWatchGET{ - Addresses: addrs, - }) -} - -// walletWatchHandlerPOST handles POST calls to /wallet/watch. -func walletWatchHandlerPOST(wallet modules.Wallet, w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - var wwpp WalletWatchPOST - err := json.NewDecoder(req.Body).Decode(&wwpp) - if err != nil { - WriteError(w, Error{"invalid parameters: " + err.Error()}, http.StatusBadRequest) - return - } - if wwpp.Remove { - err = wallet.RemoveWatchAddresses(wwpp.Addresses, wwpp.Unused) - } else { - err = wallet.AddWatchAddresses(wwpp.Addresses, wwpp.Unused) - } - if err != nil { - WriteError(w, Error{"failed to update watch set: " + err.Error()}, http.StatusBadRequest) - return - } - WriteSuccess(w) -} diff --git a/node/node.go b/node/node.go index b0a84b5..8bb152e 100644 --- a/node/node.go +++ b/node/node.go @@ -10,32 +10,32 @@ import ( "github.com/go-sql-driver/mysql" "github.com/mike76-dev/sia-satellite/mail" "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/modules/consensus" - "github.com/mike76-dev/sia-satellite/modules/gateway" "github.com/mike76-dev/sia-satellite/modules/manager" "github.com/mike76-dev/sia-satellite/modules/portal" "github.com/mike76-dev/sia-satellite/modules/provider" - "github.com/mike76-dev/sia-satellite/modules/transactionpool" + "github.com/mike76-dev/sia-satellite/modules/syncer" "github.com/mike76-dev/sia-satellite/modules/wallet" "github.com/mike76-dev/sia-satellite/persist" + "go.sia.tech/coreutils" + "go.sia.tech/coreutils/chain" ) // Node represents a satellite node containing all required modules. type Node struct { - // MySQL database. - DB *sql.DB + // Databases. + db *sql.DB + bdb *coreutils.BoltChainDB // The modules of the node. - ConsensusSet modules.ConsensusSet - Gateway modules.Gateway - Manager modules.Manager - Portal modules.Portal - Provider modules.Provider - TransactionPool modules.TransactionPool - Wallet modules.Wallet - - // The directory where all the logs are stored. - Dir string + ChainManager *chain.Manager + Syncer modules.Syncer + Manager modules.Manager + Portal modules.Portal + Provider modules.Provider + Wallet modules.Wallet + + // The start function. + Start func() (stop func()) } // Close will call close on every module within the node, combining and @@ -57,43 +57,33 @@ func (n *Node) Close() (err error) { fmt.Println("Closing wallet...") err = modules.ComposeErrors(err, n.Wallet.Close()) } - if n.TransactionPool != nil { - fmt.Println("Closing transaction pool...") - err = modules.ComposeErrors(err, n.TransactionPool.Close()) + if n.Syncer != nil { + fmt.Println("Closing syncer...") + err = modules.ComposeErrors(err, n.Syncer.Close()) } - if n.ConsensusSet != nil { - fmt.Println("Closing consensus...") - err = modules.ComposeErrors(err, n.ConsensusSet.Close()) - } - if n.Gateway != nil { - fmt.Println("Closing gateway...") - err = modules.ComposeErrors(err, n.Gateway.Close()) - } - if n.DB != nil { - fmt.Println("Closing database...") - err = modules.ComposeErrors(err, n.DB.Close()) + if n.db != nil && n.bdb != nil { + fmt.Println("Closing databases...") + err = modules.ComposeErrors(err, n.db.Close(), n.bdb.Close()) } return err } // New will create a new node. -func New(config *persist.SatdConfig, dbPassword string, loadStartTime time.Time) (*Node, <-chan error) { +func New(config *persist.SatdConfig, dbPassword, seed string, loadStartTime time.Time) (*Node, error) { // Make sure the path is an absolute one. d, err := filepath.Abs(config.Dir) - errChan := make(chan error, 1) if err != nil { - errChan <- err - return nil, errChan + return nil, err } // Create a mail client. fmt.Println("Creating mail client...") - ms, err := mail.New(config.Dir) + ms, err := mail.New(d) if err != nil { log.Fatalf("ERROR: could not create mail client: %v\n", err) } - // Connect to the database. + // Connect to the MySQL database. fmt.Println("Connecting to the SQL database...") cfg := mysql.Config{ User: config.DBUser, @@ -105,7 +95,7 @@ func New(config *persist.SatdConfig, dbPassword string, loadStartTime time.Time) } db, err := sql.Open("mysql", cfg.FormatDSN()) if err != nil { - log.Fatalf("Could not connect to the database: %v\n", err) + log.Fatalf("Could not connect to MySQL database: %v\n", err) } err = db.Ping() if err != nil { @@ -115,79 +105,83 @@ func New(config *persist.SatdConfig, dbPassword string, loadStartTime time.Time) db.SetMaxOpenConns(10) db.SetMaxIdleConns(10) - // Load gateway. - fmt.Println("Loading gateway...") - g, err := gateway.New(db, config.GatewayAddr, config.Bootstrap, true, d) + // Connect to the BoltDB database. + fmt.Println("Connecting to the BoltDB database...") + bdb, err := coreutils.OpenBoltChainDB(filepath.Join(d, "consensus.db")) if err != nil { - errChan <- modules.AddContext(err, "unable to create gateway") - return nil, errChan + log.Fatalf("Could not connect to BoltDB database: %v\n", err) } - // Load consensus. - fmt.Println("Loading consensus...") - cs, errChanCS := consensus.New(db, g, config.Bootstrap, d) - if err := modules.PeekErr(errChanCS); err != nil { - errChan <- modules.AddContext(err, "unable to create consensus set") - return nil, errChan + // Create chain manager. + fmt.Println("Loading chain manager...") + network, genesisBlock := chain.Mainnet() + dbstore, tipState, err := chain.NewDBStore(bdb, network, genesisBlock) + if err != nil { + log.Fatalf("Unable to create chain manager store: %v\n", err) } + cm := chain.NewManager(dbstore, tipState) - // Load transaction pool. - fmt.Println("Loading transaction pool...") - tp, err := transactionpool.New(db, cs, g, d) + // Load syncer. + fmt.Println("Loading syncer...") + s, err := syncer.New(cm, config.GatewayAddr, d) if err != nil { - errChan <- modules.AddContext(err, "unable to create transaction pool") - return nil, errChan + log.Fatalf("Unable to create syncer: %v\n", err) } // Load wallet. fmt.Println("Loading wallet...") - w, err := wallet.New(db, cs, tp, d) + w, err := wallet.New(db, cm, s, seed, d) if err != nil { - errChan <- modules.AddContext(err, "unable to create wallet") - return nil, errChan + return nil, modules.AddContext(err, "unable to create wallet") } // Load manager. fmt.Println("Loading manager...") - m, errChanM := manager.New(db, ms, cs, g, tp, w, d, config.Name) + m, errChanM := manager.New(db, ms, cm, s, w, d, config.Name) if err := modules.PeekErr(errChanM); err != nil { - errChan <- modules.AddContext(err, "unable to create manager") - return nil, errChan + return nil, modules.AddContext(err, "unable to create manager") } // Load provider. fmt.Println("Loading provider...") - p, errChanP := provider.New(db, g, m, config.SatelliteAddr, config.MuxAddr, d) + p, errChanP := provider.New(db, s, m, config.SatelliteAddr, config.MuxAddr, d) if err := modules.PeekErr(errChanP); err != nil { - errChan <- modules.AddContext(err, "unable to create provider") - return nil, errChan + return nil, modules.AddContext(err, "unable to create provider") } // Load portal. fmt.Println("Loading portal...") - pt, err := portal.New(config, db, ms, cs, w, m, p, d) + pt, err := portal.New(config, db, ms, cm, w, m, p, d) if err != nil { - errChan <- modules.AddContext(err, "unable to create portal") - return nil, errChan + return nil, modules.AddContext(err, "unable to create portal") } // Setup complete. fmt.Printf("API is now available, synchronous startup completed in %.3f seconds\n", time.Since(loadStartTime).Seconds()) - go func() { - close(errChan) - }() - - return &Node{ - DB: db, - - ConsensusSet: cs, - Gateway: g, - Manager: m, - Portal: pt, - Provider: p, - TransactionPool: tp, - Wallet: w, - - Dir: d, - }, errChan + + n := &Node{ + db: db, + bdb: bdb, + + ChainManager: cm, + Syncer: s, + Manager: m, + Portal: pt, + Provider: p, + Wallet: w, + } + + n.Start = func() func() { + ch := make(chan struct{}) + go func() { + s.Run() + close(ch) + }() + return func() { + n.Close() + <-ch + } + } + + return n, nil } diff --git a/persist/config.go b/persist/config.go index 42e7a69..0457075 100644 --- a/persist/config.go +++ b/persist/config.go @@ -8,8 +8,6 @@ import ( "io" "os" "path/filepath" - - "github.com/mike76-dev/sia-satellite/modules" ) // configFilename is the name of the configuration file. @@ -18,13 +16,11 @@ const configFilename = "satdconfig.json" // SatdConfig contains the fields that are passed on to the new node. type SatdConfig struct { Name string `json:"name"` - UserAgent string `json:"agent"` GatewayAddr string `json:"gateway"` APIAddr string `json:"api"` SatelliteAddr string `json:"satellite"` MuxAddr string `json:"mux"` Dir string `json:"dir"` - Bootstrap bool `json:"bootstrap"` DBUser string `json:"dbUser"` DBName string `json:"dbName"` PortalPort string `json:"portal"` @@ -40,7 +36,17 @@ type satdMetadata = struct { // metadata contains the actual values. var metadata = satdMetadata{ Header: "Satd Configuration", - Version: "0.3.0", + Version: "0.4.0", +} + +func compose(err1, err2 error) error { + if err1 == nil { + return err2 + } + if err2 == nil { + return nil + } + return fmt.Errorf("%v: %v", err1, err2) } // Load loads the configuration from disk. @@ -76,7 +82,7 @@ func loadJSON(meta satdMetadata, object interface{}, filename string) error { return fmt.Errorf("unable to open persisted json object file: %s", err) } defer func() { - err = modules.ComposeErrors(err, file.Close()) + err = compose(err, file.Close()) }() // Read the metadata from the file. @@ -86,13 +92,13 @@ func loadJSON(meta satdMetadata, object interface{}, filename string) error { return fmt.Errorf("unable to read header from persisted json object file: %s", err) } if header != meta.Header { - return errors.New("Wrong config file header") + return errors.New("wrong config file header") } if err := dec.Decode(&version); err != nil { return fmt.Errorf("unable to read version from persisted json object file: %s", err) } if version != meta.Version { - return errors.New("Wrong config file version") + return errors.New("wrong config file version") } // Read everything else. @@ -139,7 +145,7 @@ func saveJSON(meta satdMetadata, object interface{}, filename string) error { return fmt.Errorf("unable to open file: %s", err) } defer func() { - err = modules.ComposeErrors(err, file.Close()) + err = compose(err, file.Close()) }() _, err = file.Write(data) if err != nil { diff --git a/persist/log.go b/persist/log.go index ec175a1..2103cea 100644 --- a/persist/log.go +++ b/persist/log.go @@ -1,56 +1,49 @@ package persist import ( - "io" - "github.com/mike76-dev/sia-satellite/internal/build" - "gitlab.com/NebulousLabs/log" -) - -// Logger is a wrapper for log.Logger. -type Logger struct { - *log.Logger -} - -var ( - // options contains log options with build-specific information. - options = log.Options{ - BinaryName: build.BinaryName, - BugReportURL: build.IssuesURL, - Debug: false, - Release: log.Release, - Version: build.NodeVersion, - } + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) // printCommitHash logs build.GitRevision at startup. -func printCommitHash(logger *log.Logger) { +func printCommitHash(logger *zap.Logger) { if build.GitRevision != "" { - logger.Printf("STARTUP: Commit hash %v", build.GitRevision) + logger.Sugar().Infof("STARTUP: commit hash %v", build.GitRevision) } else { - logger.Println("STARTUP: Unknown commit hash") + logger.Sugar().Info("STARTUP: unknown commit hash") } } // NewFileLogger returns a logger that logs to logFilename. The file is opened // in append mode, and created if it does not exist. -func NewFileLogger(logFilename string) (*Logger, error) { - logger, err := log.NewFileLogger(logFilename, options) +func NewFileLogger(logFilename string) (*zap.Logger, func(), error) { + writer, closeFn, err := zap.Open(logFilename) if err != nil { - return nil, err + return nil, nil, err } - printCommitHash(logger) - return &Logger{logger}, nil -} -// NewLogger returns a logger that can be closed. Calls should not be made to -// the logger after 'Close' has been called. -func NewLogger(w io.Writer) (*Logger, error) { - logger, err := log.NewLogger(w, options) - if err != nil { - return nil, err - } + config := zap.NewProductionEncoderConfig() + config.EncodeTime = zapcore.RFC3339TimeEncoder + config.StacktraceKey = "" + fileEncoder := zapcore.NewJSONEncoder(config) + + core := zapcore.NewTee( + zapcore.NewCore(fileEncoder, writer, zapcore.DebugLevel), + ) + + logger := zap.New( + core, + zap.AddCaller(), + zap.AddStacktrace(zapcore.ErrorLevel), + ) + printCommitHash(logger) - return &Logger{logger}, nil + + return logger, func() { + logger.Sugar().Info("logging terminated") + logger.Sync() + closeFn() + }, nil } diff --git a/satc/consensuscmd.go b/satc/consensuscmd.go index 7c9432b..dbef26a 100644 --- a/satc/consensuscmd.go +++ b/satc/consensuscmd.go @@ -4,16 +4,16 @@ import ( "fmt" "time" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/node/api" "github.com/spf13/cobra" + "go.sia.tech/core/consensus" + "go.sia.tech/coreutils/chain" ) var ( consensusCmd = &cobra.Command{ Use: "consensus", Short: "Print the current state of consensus", - Long: "Print the current state of consensus such as current block, block height, and target.", + Long: "Print the current state of consensus such as current block and block height.", Run: wrap(consensuscmd), } ) @@ -21,34 +21,26 @@ var ( // consensuscmd is the handler for the command `satc consensus`. // Prints the current state of consensus. func consensuscmd() { - cg, err := httpClient.ConsensusGet() - if modules.ContainsError(err, api.ErrAPICallNotRecognized) { - // Assume module is not loaded if status command is not recognized. - fmt.Printf("Consensus:\n Status: %s\n\n", moduleNotReadyStatus) - return - } else if err != nil { + tip, err := httpClient.ConsensusTip() + if err != nil { die("Could not get current consensus state:", err) } - if cg.Synced { + if tip.Synced { fmt.Printf(`Synced: %v Block: %v Height: %v -Target: %v -Difficulty: %v -`, yesNo(cg.Synced), cg.CurrentBlock, cg.Height, cg.Target, cg.Difficulty) +`, yesNo(tip.Synced), tip.BlockID, tip.Height) } else { - estimatedHeight := (time.Now().Unix() - modules.GenesisTimestamp.Unix()) / modules.BlockFrequency - estimatedProgress := float64(cg.Height) / float64(estimatedHeight) * 100 + _, genesisBlock := chain.Mainnet() + estimatedHeight := (time.Now().Unix() - genesisBlock.Timestamp.Unix()) / int64(consensus.State{}.BlockInterval().Seconds()) + estimatedProgress := float64(tip.Height) / float64(estimatedHeight) * 100 if estimatedProgress > 100 { estimatedProgress = 99.9 } - if estimatedProgress == 100 && !cg.Synced { - estimatedProgress = 99.9 - } fmt.Printf(`Synced: %v Height: %v Progress (estimated): %.1f%% -`, yesNo(cg.Synced), cg.Height, estimatedProgress) +`, yesNo(tip.Synced), tip.Height, estimatedProgress) } } diff --git a/satc/consts.go b/satc/consts.go deleted file mode 100644 index 0159d9c..0000000 --- a/satc/consts.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -const ( - // moduleNotReadyStatus is the error message displayed when an API call error - // suggests that a modules is not yet ready for usage. - moduleNotReadyStatus = "Module not loaded or still starting up" -) diff --git a/satc/daemoncmd.go b/satc/daemoncmd.go index 18ca879..811d8a2 100644 --- a/satc/daemoncmd.go +++ b/satc/daemoncmd.go @@ -4,25 +4,10 @@ import ( "fmt" "github.com/mike76-dev/sia-satellite/internal/build" - "github.com/mike76-dev/sia-satellite/modules" "github.com/spf13/cobra" ) var ( - alertsCmd = &cobra.Command{ - Use: "alerts", - Short: "view daemon alerts", - Long: "view daemon alerts", - Run: wrap(alertscmd), - } - - stopCmd = &cobra.Command{ - Use: "stop", - Short: "Stop the Sia daemon", - Long: "Stop the Sia daemon.", - Run: wrap(stopcmd), - } - versionCmd = &cobra.Command{ Use: "version", Short: "Print version information", @@ -31,55 +16,6 @@ var ( } ) -// alertscmd prints the alerts from the daemon. This will not print critical -// alerts as critical alerts are printed on every satc command. -func alertscmd() { - const maxAlerts = 1000 - - al, err := httpClient.DaemonAlertsGet() - if err != nil { - fmt.Println("Could not get daemon alerts:", err) - return - } - if len(al.Alerts) == 0 { - fmt.Println("There are no alerts registered.") - return - } - if len(al.Alerts) == len(al.CriticalAlerts) { - // Return since critical alerts are already displayed. - return - } - - remaining := maxAlerts - for sev := modules.AlertSeverity(modules.SeverityError); sev >= modules.SeverityInfo; sev-- { - if remaining <= 0 { - return - } - - var alerts []modules.Alert - switch sev { - case modules.SeverityError: - alerts = al.ErrorAlerts - case modules.SeverityWarning: - alerts = al.WarningAlerts - case modules.SeverityInfo: - alerts = al.InfoAlerts - } - - n := len(alerts) - if n > remaining { - n = remaining - } - - remaining -= n - printAlerts(alerts[:n], sev) - } - - if len(al.Alerts) > maxAlerts { - fmt.Printf("Only %v/%v alerts are displayed.\n", maxAlerts, len(al.Alerts)) - } -} - // version prints the version of satc and satd. func versioncmd() { fmt.Println("Satellite Client") @@ -88,7 +24,7 @@ func versioncmd() { fmt.Println("\tGit Revision " + build.GitRevision) fmt.Println("\tBuild Time " + build.BuildTime) } - dvg, err := httpClient.DaemonVersionGet() + dvg, err := httpClient.DaemonVersion() if err != nil { fmt.Println("Could not get daemon version:", err) return @@ -100,28 +36,3 @@ func versioncmd() { fmt.Println("\tBuild Time " + dvg.BuildTime) } } - -// stopcmd is the handler for the command `satc stop`. -// Stops the daemon. -func stopcmd() { - err := httpClient.DaemonStopGet() - if err != nil { - die("Could not stop daemon:", err) - } - fmt.Println("Satellite daemon stopped.") -} - -// printAlerts is a helper function to print details of a slice of alerts -// with given severity description to command line -func printAlerts(alerts []modules.Alert, as modules.AlertSeverity) { - fmt.Printf("\n There are %v %s alerts\n", len(alerts), as.String()) - for _, a := range alerts { - fmt.Printf(` ------------------- - Module: %s - Severity: %s - Message: %s - Cause: %s`, a.Module, a.Severity.String(), a.Msg, a.Cause) - } - fmt.Printf("\n------------------\n\n") -} diff --git a/satc/gatewaycmd.go b/satc/gatewaycmd.go deleted file mode 100644 index 18713d0..0000000 --- a/satc/gatewaycmd.go +++ /dev/null @@ -1,224 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/tabwriter" - - "github.com/mike76-dev/sia-satellite/modules" - "github.com/spf13/cobra" -) - -var ( - gatewayAddressCmd = &cobra.Command{ - Use: "address", - Short: "Print the gateway address", - Long: "Print the network address of the gateway.", - Run: wrap(gatewayaddresscmd), - } - - gatewayCmd = &cobra.Command{ - Use: "gateway", - Short: "Perform gateway actions", - Long: "View and manage the gateway's connected peers.", - Run: wrap(gatewaycmd), - } - - gatewayBlocklistCmd = &cobra.Command{ - Use: "blocklist", - Short: "View and manage the gateway's blocklisted peers", - Long: "Display and manage the peers currently on the gateway blocklist.", - Run: wrap(gatewayblocklistcmd), - } - - gatewayBlocklistAppendCmd = &cobra.Command{ - Use: "append [ip] [ip] [ip] [ip]...", - Short: "Adds new ip address(es) to the gateway blocklist.", - Long: `Adds new ip address(es) to the gateway blocklist. -Accepts a list of ip addresses or domain names as individual inputs. -For example: satc gateway blocklist append 123.123.123.123 111.222.111.222 mysiahost.duckdns.org`, - Run: gatewayblocklistappendcmd, - } - - gatewayBlocklistClearCmd = &cobra.Command{ - Use: "clear", - Short: "Clear the blocklisted peers list", - Long: `Clear the blocklisted peers list. - For example: satc gateway blocklist clear`, - Run: gatewayblocklistclearcmd, - } - - gatewayBlocklistRemoveCmd = &cobra.Command{ - Use: "remove [ip] [ip] [ip] [ip]...", - Short: "Remove ip address(es) from the gateway blocklist.", - Long: `Remove ip address(es) from the gateway blocklist. -Accepts a list of ip addresses or domain names as individual inputs. -For example: satc gateway blocklist remove 123.123.123.123 111.222.111.222 mysiahost.duckdns.org`, - Run: gatewayblocklistremovecmd, - } - - gatewayBlocklistSetCmd = &cobra.Command{ - Use: "set [ip] [ip] [ip] [ip]...", - Short: "Set the gateway's blocklist", - Long: `Set the gateway's blocklist. -Accepts a list of ip addresses or domain names as individual inputs. -For example: satc gateway blocklist set 123.123.123.123 111.222.111.222 mysiahost.duckdns.org`, - Run: gatewayblocklistsetcmd, - } - - gatewayConnectCmd = &cobra.Command{ - Use: "connect [address]", - Short: "Connect to a peer", - Long: "Connect to a peer and add it to the node list.", - Run: wrap(gatewayconnectcmd), - } - - gatewayDisconnectCmd = &cobra.Command{ - Use: "disconnect [address]", - Short: "Disconnect from a peer", - Long: "Disconnect from a peer. Does not remove the peer from the node list.", - Run: wrap(gatewaydisconnectcmd), - } - - gatewayListCmd = &cobra.Command{ - Use: "list", - Short: "View a list of peers", - Long: "View the current peer list.", - Run: wrap(gatewaylistcmd), - } -) - -// gatewayconnectcmd is the handler for the command `satc gateway add [address]`. -// Adds a new peer to the peer list. -func gatewayconnectcmd(addr string) { - err := httpClient.GatewayConnectPost(modules.NetAddress(addr)) - if err != nil { - die("Could not add peer:", err) - } - fmt.Println("Added", addr, "to peer list.") -} - -// gatewaydisconnectcmd is the handler for the command `satc gateway remove [address]`. -// Removes a peer from the peer list. -func gatewaydisconnectcmd(addr string) { - err := httpClient.GatewayDisconnectPost(modules.NetAddress(addr)) - if err != nil { - die("Could not remove peer:", err) - } - fmt.Println("Removed", addr, "from peer list.") -} - -// gatewayaddresscmd is the handler for the command `satc gateway address`. -// Prints the gateway's network address. -func gatewayaddresscmd() { - info, err := httpClient.GatewayGet() - if err != nil { - die("Could not get gateway address:", err) - } - fmt.Println("Address:", info.NetAddress) -} - -// gatewaycmd is the handler for the command `satc gateway`. -// Prints the gateway's network address and number of peers. -func gatewaycmd() { - info, err := httpClient.GatewayGet() - if err != nil { - die("Could not get gateway address:", err) - } - fmt.Println("Address:", info.NetAddress) - fmt.Println("Active peers:", len(info.Peers)) -} - -// gatewayblocklistcmd is the handler for the command `satc gateway blocklist` -// Prints the ip addresses on the gateway blocklist. -func gatewayblocklistcmd() { - gbg, err := httpClient.GatewayBlocklistGet() - if err != nil { - die("Could not get gateway blocklist", err) - } - fmt.Println(len(gbg.Blocklist), "ip addresses currently on the gateway blocklist") - for _, ip := range gbg.Blocklist { - fmt.Println(ip) - } -} - -// gatewayblocklistappendcmd is the handler for the command -// `satc gateway blocklist append`. -// Adds one or more new ip addresses to the gateway's blocklist. -func gatewayblocklistappendcmd(cmd *cobra.Command, addresses []string) { - if len(addresses) == 0 { - fmt.Println("No IP addresses submitted to append") - _ = cmd.UsageFunc()(cmd) - os.Exit(exitCodeUsage) - } - err := httpClient.GatewayAppendBlocklistPost(addresses) - if err != nil { - die("Could not append the ip addresses(es) to the gateway blocklist", err) - } - fmt.Println(addresses, "successfully added to the gateway blocklist") -} - -// gatewayblocklistclearcmd is the handler for the command -// `satc gateway blocklist clear`. -// Clears the gateway blocklist. -func gatewayblocklistclearcmd(cmd *cobra.Command, addresses []string) { - err := httpClient.GatewaySetBlocklistPost(addresses) - if err != nil { - die("Could not clear the gateway blocklist", err) - } - fmt.Println("successfully cleared the gateway blocklist") -} - -// gatewayblocklistremovecmd is the handler for the command -// `satc gateway blocklist remove`. -// Removes one or more ip addresses from the gateway's blocklist. -func gatewayblocklistremovecmd(cmd *cobra.Command, addresses []string) { - if len(addresses) == 0 { - fmt.Println("No IP addresses submitted to remove") - _ = cmd.UsageFunc()(cmd) - os.Exit(exitCodeUsage) - } - err := httpClient.GatewayRemoveBlocklistPost(addresses) - if err != nil { - die("Could not remove the ip address(es) from the gateway blocklist", err) - } - fmt.Println(addresses, "was successfully removed from the gateway blocklist") -} - -// gatewayblocklistsetcmd is the handler for the command -// `satc gateway blocklist set`. -// Sets the gateway blocklist to the ip addresses passed in. -func gatewayblocklistsetcmd(cmd *cobra.Command, addresses []string) { - if len(addresses) == 0 { - fmt.Println("No IP addresses submitted") - _ = cmd.UsageFunc()(cmd) - os.Exit(exitCodeUsage) - } - err := httpClient.GatewaySetBlocklistPost(addresses) - if err != nil { - die("Could not set the gateway blocklist", err) - } - fmt.Println(addresses, "was successfully set as the gateway blocklist") -} - -// gatewaylistcmd is the handler for the command `satc gateway list`. -// Prints a list of all peers. -func gatewaylistcmd() { - info, err := httpClient.GatewayGet() - if err != nil { - die("Could not get peer list:", err) - } - if len(info.Peers) == 0 { - fmt.Println("No peers to show.") - return - } - fmt.Println(len(info.Peers), "active peers:") - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - fmt.Fprintln(w, "Version\tOutbound\tAddress") - for _, peer := range info.Peers { - fmt.Fprintf(w, "%v\t%v\t%v\n", peer.Version, yesNo(!peer.Inbound), peer.NetAddress) - } - if err := w.Flush(); err != nil { - die("failed to flush writer") - } -} diff --git a/satc/hostdbcmd.go b/satc/hostdbcmd.go index d5977dc..646fa04 100644 --- a/satc/hostdbcmd.go +++ b/satc/hostdbcmd.go @@ -18,7 +18,6 @@ const scanHistoryLen = 30 var ( hostdbNumHosts int - hostdbVerbose bool ) var ( @@ -55,7 +54,7 @@ var ( ) // printScoreBreakdown prints the score breakdown of a host, provided the info. -func printScoreBreakdown(info *api.HostdbHostsGET) { +func printScoreBreakdown(info *api.HostdbHostGET) { fmt.Println("\n Score Breakdown:") w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) fmt.Fprintf(w, "\t\tAge:\t %.3f\n", info.ScoreBreakdown.Age) @@ -75,12 +74,8 @@ func printScoreBreakdown(info *api.HostdbHostsGET) { // Lists hosts known to the hostdb. func hostdbcmd() { if !verbose { - info, err := httpClient.HostDbActiveGet() - if modules.ContainsError(err, api.ErrAPICallNotRecognized) { - // Assume module is not loaded if status command is not recognized. - fmt.Printf("HostDB:\n Status: %s\n\n", moduleNotReadyStatus) - return - } else if err != nil { + info, err := httpClient.HostDbActiveHosts() + if err != nil { die("Could not fetch host list:", err) } @@ -91,7 +86,7 @@ func hostdbcmd() { // Strip down to the number of requested hosts. if hostdbNumHosts != 0 && hostdbNumHosts < len(info.Hosts) { - info.Hosts = info.Hosts[len(info.Hosts) - hostdbNumHosts:] + info.Hosts = info.Hosts[len(info.Hosts)-hostdbNumHosts:] } fmt.Println(len(info.Hosts), "Active Hosts:") @@ -99,13 +94,13 @@ func hostdbcmd() { fmt.Fprintln(w, "\t\tAddress\tVersion\tPrice (per TB per Mo)") for i, host := range info.Hosts { price := host.Settings.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) - fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\n", len(info.Hosts) - i, host.Settings.NetAddress, host.Settings.Version, price) + fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\n", len(info.Hosts)-i, host.Settings.NetAddress, host.Settings.Version, price) } if err := w.Flush(); err != nil { die("failed to flush writer") } } else { - info, err := httpClient.HostDbAllGet() + info, err := httpClient.HostDbAllHosts() if err != nil { die("Could not fetch host list:", err) } @@ -117,11 +112,11 @@ func hostdbcmd() { // Iterate through the hosts and divide by category. var activeHosts, inactiveHosts, offlineHosts []api.ExtendedHostDBEntry for _, host := range info.Hosts { - if host.Settings.AcceptingContracts && len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory) - 1].Success { + if host.Settings.AcceptingContracts && len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success { activeHosts = append(activeHosts, host) continue } - if len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory) - 1].Success { + if len(host.ScanHistory) > 0 && host.ScanHistory[len(host.ScanHistory)-1].Success { inactiveHosts = append(inactiveHosts, host) continue } @@ -129,13 +124,13 @@ func hostdbcmd() { } if hostdbNumHosts > 0 && len(offlineHosts) > hostdbNumHosts { - offlineHosts = offlineHosts[len(offlineHosts) - hostdbNumHosts:] + offlineHosts = offlineHosts[len(offlineHosts)-hostdbNumHosts:] } if hostdbNumHosts > 0 && len(inactiveHosts) > hostdbNumHosts { - inactiveHosts = inactiveHosts[len(inactiveHosts) - hostdbNumHosts:] + inactiveHosts = inactiveHosts[len(inactiveHosts)-hostdbNumHosts:] } if hostdbNumHosts > 0 && len(activeHosts) > hostdbNumHosts { - activeHosts = activeHosts[len(activeHosts) - hostdbNumHosts:] + activeHosts = activeHosts[len(activeHosts)-hostdbNumHosts:] } fmt.Println() @@ -160,14 +155,14 @@ func hostdbcmd() { recentTime = scan.Timestamp recentSuccess = scan.Success } - uptimeRatio = float64(uptime) / float64(uptime + downtime) + uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get the scan history string. scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { - displayScans = host.ScanHistory[len(host.ScanHistory) - scanHistoryLen:] + displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { @@ -181,7 +176,7 @@ func hostdbcmd() { // recent scans. price := host.Settings.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.Settings.StoragePrice.Mul64(modules.BytesPerTerabyte) - fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(offlineHosts) - i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, modules.FilesizeUnits(host.Settings.RemainingStorage), price, downloadBWPrice, uptimeRatio, scanHistStr) + fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(offlineHosts)-i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, modules.FilesizeUnits(host.Settings.RemainingStorage), price, downloadBWPrice, uptimeRatio, scanHistStr) } if err := w.Flush(); err != nil { die("failed to flush writer") @@ -209,7 +204,7 @@ func hostdbcmd() { recentTime = scan.Timestamp recentSuccess = scan.Success } - uptimeRatio = float64(uptime) / float64(uptime + downtime) + uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get a string representation of the historic outcomes of the most @@ -217,7 +212,7 @@ func hostdbcmd() { scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { - displayScans = host.ScanHistory[len(host.ScanHistory) - scanHistoryLen:] + displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { @@ -230,7 +225,7 @@ func hostdbcmd() { price := host.Settings.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) collateral := host.Settings.Collateral.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.Settings.DownloadBandwidthPrice.Mul64(modules.BytesPerTerabyte) - fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(inactiveHosts) - i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, modules.FilesizeUnits(host.Settings.RemainingStorage), price, collateral, downloadBWPrice, uptimeRatio, scanHistStr) + fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(inactiveHosts)-i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, modules.FilesizeUnits(host.Settings.RemainingStorage), price, collateral, downloadBWPrice, uptimeRatio, scanHistStr) } fmt.Fprintln(w, "\t\tPubkey\tAddress\tVersion\tRemaining Storage\tPrice (/ TB / Month)\tCollateral (/ TB / Month)\tDownload Price (/ TB)\tUptime\tRecent Scans") if err := w.Flush(); err != nil { @@ -243,7 +238,7 @@ func hostdbcmd() { referenceScore := big.NewRat(1, 1) if len(activeHosts) > 0 { referenceIndex := len(activeHosts) * 3 / 5 - hostInfo, err := httpClient.HostDbHostsGet(activeHosts[referenceIndex].PublicKey) + hostInfo, err := httpClient.HostDbHost(activeHosts[referenceIndex].PublicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -274,7 +269,7 @@ func hostdbcmd() { recentTime = scan.Timestamp recentSuccess = scan.Success } - uptimeRatio = float64(uptime) / float64(uptime + downtime) + uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Get a string representation of the historic outcomes of the most @@ -282,7 +277,7 @@ func hostdbcmd() { scanHistStr := "" displayScans := host.ScanHistory if len(host.ScanHistory) > scanHistoryLen { - displayScans = host.ScanHistory[len(host.ScanHistory) - scanHistoryLen:] + displayScans = host.ScanHistory[len(host.ScanHistory)-scanHistoryLen:] } for _, scan := range displayScans { if scan.Success { @@ -293,7 +288,7 @@ func hostdbcmd() { } // Grab the score information for the active hosts. - hostInfo, err := httpClient.HostDbHostsGet(host.PublicKey) + hostInfo, err := httpClient.HostDbHost(host.PublicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -302,7 +297,7 @@ func hostdbcmd() { price := host.Settings.StoragePrice.Mul(modules.BlockBytesPerMonthTerabyte) collateral := host.Settings.Collateral.Mul(modules.BlockBytesPerMonthTerabyte) downloadBWPrice := host.Settings.DownloadBandwidthPrice.Mul64(modules.BytesPerTerabyte) - fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%12.6g\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(activeHosts) - i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, score, modules.FilesizeUnits(host.Settings.RemainingStorage), host.Settings.ContractPrice, price, collateral, downloadBWPrice, uptimeRatio, scanHistStr) + fmt.Fprintf(w, "\t%v:\t%v\t%v\t%v\t%12.6g\t%v\t%v\t%v\t%v\t%v\t%.3f\t%s\n", len(activeHosts)-i, host.PublicKeyString, host.Settings.NetAddress, host.Settings.Version, score, modules.FilesizeUnits(host.Settings.RemainingStorage), host.Settings.ContractPrice, price, collateral, downloadBWPrice, uptimeRatio, scanHistStr) } fmt.Fprintln(w, "\t\tPubkey\tAddress\tVersion\tScore\tRemaining Storage\tContract Fee\tPrice (/ TB / Month)\tCollateral (/ TB / Month)\tDownload Price (/TB)\tUptime\tRecent Scans") if err := w.Flush(); err != nil { @@ -314,7 +309,7 @@ func hostdbcmd() { // hostdbfiltermodecmd is the handler for the command `satc hostdb // filtermode`. func hostdbfiltermodecmd() { - hdfmg, err := httpClient.HostDbFilterModeGet() + hdfmg, err := httpClient.HostDbFilterMode() if err != nil { die(err) } @@ -365,7 +360,7 @@ func hostdbsetfiltermodecmd(cmd *cobra.Command, args []string) { die() } - err = httpClient.HostDbFilterModePost(fm, hosts, netAddresses) + err = httpClient.HostDbSetFilterMode(fm, hosts, netAddresses) if err != nil { fmt.Println("Could not set hostdb filtermode: ", err) die() @@ -380,7 +375,7 @@ func hostdbviewcmd(pubkey string) { if err := publicKey.UnmarshalText([]byte(pubkey)); err != nil { die("Could not unmarshal public key:", err) } - info, err := httpClient.HostDbHostsGet(publicKey) + info, err := httpClient.HostDbHost(publicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -440,7 +435,7 @@ func hostdbviewcmd(pubkey string) { recentTime = scan.Timestamp recentSuccess = scan.Success } - uptimeRatio = float64(uptime) / float64(uptime + downtime) + uptimeRatio = float64(uptime) / float64(uptime+downtime) } // Compute the uptime ratio, but shift by 0.02 to acknowledge fully that diff --git a/satc/main.go b/satc/main.go index 87a8955..b6f7200 100644 --- a/satc/main.go +++ b/satc/main.go @@ -2,38 +2,22 @@ package main import ( "fmt" - "math" "os" "reflect" "github.com/mike76-dev/sia-satellite/internal/build" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/node/api" "github.com/mike76-dev/sia-satellite/node/api/client" "github.com/spf13/cobra" ) var ( - // General Flags. - alertSuppress bool - verbose bool // Display additional information. - - // Module Specific Flags. - // - // Wallet Flags. - initForce bool // Destroy and re-encrypt the wallet on init if it already exists. - initPassword bool // Supply a custom password when creating a wallet. - walletRawTxn bool // Encode/decode transactions in base64-encoded binary. - walletStartHeight uint64 // Start height for transaction search. - walletEndHeight uint64 // End height for transaction search. - walletTxnFeeIncluded bool // Include the fee in the balance being sent. - insecureInput bool // Insecure password/seed input. Disables the shoulder-surfing and Mac secure input feature. + verbose bool // Display additional information. ) var ( // Globals. rootCmd *cobra.Command // Root command cobra object, used by bash completion cmd. - httpClient client.Client + httpClient *client.Client ) // Exit codes. @@ -84,51 +68,48 @@ func statuscmd() { defer fmt.Println() // Consensus Info. - cg, err := httpClient.ConsensusGet() - if modules.ContainsError(err, api.ErrAPICallNotRecognized) { - // Assume module is not loaded if status command is not recognized. - fmt.Printf("Consensus:\n Status: %s\n\n", moduleNotReadyStatus) - } else if err != nil { + cg, err := httpClient.ConsensusTip() + if err != nil { die("Could not get consensus status:", err) - } else { - fmt.Printf(`Consensus: + } + fmt.Printf(`Consensus: Synced: %v Height: %v `, yesNo(cg.Synced), cg.Height) + + // TxPool Info. + fee, err := httpClient.TxpoolFee() + if err != nil { + die("Could not get mining fee:", err) } + fmt.Printf(`Recommended fee: + %v / KB +`, fee.Mul64(1e3)) // Wallet Info. - walletStatus, err := httpClient.WalletGet() - if modules.ContainsError(err, api.ErrAPICallNotRecognized) { - // Assume module is not loaded if status command is not recognized. - fmt.Printf("Wallet:\n Status: %s\n\n", moduleNotReadyStatus) - } else if err != nil { + walletStatus, err := httpClient.WalletBalance() + if err != nil { die("Could not get wallet status:", err) - } else if walletStatus.Unlocked { - fmt.Printf(`Wallet: - Status: unlocked - Siacoin Balance: %v -`, walletStatus.ConfirmedSiacoinBalance) - } else { - fmt.Printf(`Wallet: - Status: Locked -`) } + fmt.Printf(`Wallet: + Siacoin Balance: %v + Siafund Balance: %v + `, walletStatus.Siacoins, walletStatus.Siafunds) // Manager Info. - renters, err := httpClient.ManagerRentersGet() - if err != nil { - die(err) - } - contracts, err := httpClient.ManagerContractsGet("") - if err != nil { - die(err) - } + /*renters, err := httpClient.ManagerRentersGet() + if err != nil { + die(err) + } + contracts, err := httpClient.ManagerContractsGet("") + if err != nil { + die(err) + } - fmt.Printf(`Manager: - Renters: %v - Active Contracts: %v -`, len(renters.Renters), len(contracts.ActiveContracts)) + fmt.Printf(`Manager: + Renters: %v + Active Contracts: %v + `, len(renters.Renters), len(contracts.ActiveContracts))*/ } func main() { @@ -136,21 +117,8 @@ func main() { rootCmd = initCmds() // Initialize client. - initClient(rootCmd, &verbose, &httpClient, &alertSuppress) - - // Perform some basic actions after cobra has initialized. - cobra.OnInitialize(func() { - // Set API password if it was not set. - setAPIPasswordIfNotSet() - - // Check for Critical Alerts. - alerts, err := httpClient.DaemonAlertsGet() - if err == nil && len(alerts.CriticalAlerts) > 0 && !alertSuppress { - printAlerts(alerts.CriticalAlerts, modules.SeverityCritical) - fmt.Println("------------------") - fmt.Printf("\n The above %v critical alerts should be resolved ASAP\n\n", len(alerts.CriticalAlerts)) - } - }) + httpClient = client.NewClient() + initClient(rootCmd) // Run. if err := rootCmd.Execute(); err != nil { @@ -172,15 +140,11 @@ func initCmds() *cobra.Command { } // Daemon Commands. - root.AddCommand(alertsCmd, stopCmd, versionCmd) + root.AddCommand(versionCmd) // Create command tree (alphabetized by root command). root.AddCommand(consensusCmd) - root.AddCommand(gatewayCmd) - gatewayCmd.AddCommand(gatewayAddressCmd, gatewayBlocklistCmd, gatewayConnectCmd, gatewayDisconnectCmd, gatewayListCmd) - gatewayBlocklistCmd.AddCommand(gatewayBlocklistAppendCmd, gatewayBlocklistClearCmd, gatewayBlocklistRemoveCmd, gatewayBlocklistSetCmd) - root.AddCommand(hostdbCmd) hostdbCmd.AddCommand(hostdbFiltermodeCmd, hostdbSetFiltermodeCmd, hostdbViewCmd) hostdbCmd.Flags().IntVarP(&hostdbNumHosts, "numhosts", "n", 0, "Number of hosts to display from the hostdb") @@ -203,46 +167,19 @@ func initCmds() *cobra.Command { portalAnnouncementCmd.AddCommand(portalAnnouncementSetCmd) portalAnnouncementCmd.AddCommand(portalAnnouncementRemoveCmd) + root.AddCommand(syncerCmd) + syncerCmd.AddCommand(syncerConnectCmd, syncerPeersCmd) + root.AddCommand(walletCmd) - walletCmd.AddCommand(walletAddressCmd, walletAddressesCmd, walletBalanceCmd, walletBroadcastCmd, walletChangePasswordCmd, walletInitCmd, walletInitSeedCmd, walletLoadCmd, walletLockCmd, walletSeedsCmd, walletSendCmd, walletSignCmd, walletSweepCmd, walletTransactionsCmd, walletUnlockCmd) - walletInitCmd.Flags().BoolVarP(&initPassword, "password", "p", false, "Prompt for a custom password") - walletInitCmd.Flags().BoolVarP(&initForce, "force", "", false, "destroy the existing wallet and re-encrypt") - walletInitCmd.Flags().BoolVarP(&insecureInput, "insecure-input", "", false, "Disable shoulder-surf protection (echoing passwords and seeds)") - walletInitSeedCmd.Flags().BoolVarP(&initForce, "force", "", false, "destroy the existing wallet") - walletInitSeedCmd.Flags().BoolVarP(&insecureInput, "insecure-input", "", false, "Disable shoulder-surf protection (echoing passwords and seeds)") - walletLoadCmd.AddCommand(walletLoadSeedCmd) + walletCmd.AddCommand(walletAddressCmd, walletAddressesCmd, walletBalanceCmd, walletSendCmd) walletSendCmd.AddCommand(walletSendSiacoinsCmd) - walletSendSiacoinsCmd.Flags().BoolVarP(&walletTxnFeeIncluded, "fee-included", "", false, "Take the transaction fee out of the balance being submitted instead of the fee being additional") - walletChangePasswordCmd.Flags().BoolVarP(&insecureInput, "insecure-input", "", false, "Disable shoulder-surf protection (echoing passwords and seeds)") - walletUnlockCmd.Flags().BoolVarP(&insecureInput, "insecure-input", "", false, "Disable shoulder-surf protection (echoing passwords and seeds)") - walletUnlockCmd.Flags().BoolVarP(&initPassword, "password", "p", false, "Display interactive password prompt even if SATD_WALLET_PASSWORD is set") - walletBroadcastCmd.Flags().BoolVarP(&walletRawTxn, "raw", "", false, "Decode transaction as base64 instead of JSON") - walletSignCmd.Flags().BoolVarP(&walletRawTxn, "raw", "", false, "Encode signed transaction as base64 instead of JSON") - walletTransactionsCmd.Flags().Uint64Var(&walletStartHeight, "startheight", 0, " Height of the block where transaction history should begin.") - walletTransactionsCmd.Flags().Uint64Var(&walletEndHeight, "endheight", math.MaxUint64, " Height of the block where transaction history should end.") return root } // initClient initializes client cmd flags and default values. -func initClient(root *cobra.Command, verbose *bool, client *client.Client, alertSuppress *bool) { - root.PersistentFlags().BoolVarP(verbose, "verbose", "v", false, "Display additional information") - root.PersistentFlags().StringVarP(&client.Address, "addr", "a", "localhost:9990", "which host/port to communicate with (i.e. the host/port satd is listening on)") - root.PersistentFlags().StringVarP(&client.Password, "apipassword", "", "", "the password for the API's http authentication") - root.PersistentFlags().StringVarP(&client.UserAgent, "useragent", "", "Sat-Agent", "the useragent used by satc to connect to the daemon's API") - root.PersistentFlags().BoolVarP(alertSuppress, "alert-suppress", "s", false, "suppress satc alerts") -} - -// setAPIPasswordIfNotSet sets API password if it was not set. -func setAPIPasswordIfNotSet() { - // Check if the API Password is set. - if httpClient.Password == "" { - // No password passed in, fetch the API Password. - pwd := os.Getenv("SATD_API_PASSWORD") - if pwd == "" { - fmt.Println("Exiting: Error getting API Password") - os.Exit(exitCodeGeneral) - } - httpClient.Password = pwd - } +func initClient(root *cobra.Command) { + root.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Display additional information") + root.PersistentFlags().StringVarP(&httpClient.Client().BaseURL, "addr", "a", "http://localhost:9990/api", "which host/port to communicate with (i.e. the host/port satd is listening on)") + root.PersistentFlags().StringVarP(&httpClient.Client().Password, "apipassword", "", "", "the password for the API's http authentication") } diff --git a/satc/managercmd.go b/satc/managercmd.go index 2b66f98..7131da2 100644 --- a/satc/managercmd.go +++ b/satc/managercmd.go @@ -29,14 +29,14 @@ var ( } managerBalanceCmd = &cobra.Command{ - Use: "balance [public_key}", + Use: "balance [public_key]", Short: "Print the renter balance", Long: "Print the balance of the renter with the given public key.", Run: wrap(managerbalancecmd), } managerContractsCmd = &cobra.Command{ - Use: "contracts [public_key] or contracts all", + Use: "contracts [public_key] | contracts all", Short: "Print the list of the contracts", Long: "Print the list of the contracts. A renter public key may be provided.", Run: wrap(managercontractscmd), @@ -118,7 +118,7 @@ For payment_plan, either 'prepayment' or 'invoicing' are accepted.`, // manageraveragescmd is the handler for the command `satc manager averages [currency]`. // Displays the host network averages in the given currency. func manageraveragescmd(currency string) { - ha, err := httpClient.ManagerAveragesGet(currency) + ha, err := httpClient.ManagerAverages(currency) if err != nil { die(err) } @@ -148,7 +148,7 @@ func manageraveragescmd(currency string) { // managerrenterscmd is the handler for the command `satc manager renters`. // Prints the list of the renters. func managerrenterscmd() { - renters, err := httpClient.ManagerRentersGet() + renters, err := httpClient.ManagerRenters() if err != nil { die(err) } @@ -166,7 +166,7 @@ func managerrenterscmd() { // managerrentercmd is the handler for the command `satc manager renter [public_key]`. // Prints the settings of the given renter. func managerrentercmd(key string) { - renter, err := httpClient.ManagerRenterGet(key) + renter, err := httpClient.ManagerRenter(key) if err != nil { die(err) } @@ -213,7 +213,7 @@ Auto Repair Files: %v // managerbalancecmd is the handler for the command `satc manager balance [public_key]`. // Prints the balance of the given renter. func managerbalancecmd(key string) { - ub, err := httpClient.ManagerBalanceGet(key) + ub, err := httpClient.ManagerBalance(key) if err != nil { die(err) } @@ -232,7 +232,7 @@ func managercontractscmd(key string) { if strings.ToLower(key) == "all" { key = "" } - contracts, err := httpClient.ManagerContractsGet(key) + contracts, err := httpClient.ManagerContracts(key) if err != nil { die(err) } @@ -413,15 +413,15 @@ Expired Refreshed: %v // managercmd is the handler for the command `satc manager`. // Prints the information about the manager. func managercmd() { - renters, err := httpClient.ManagerRentersGet() + renters, err := httpClient.ManagerRenters() if err != nil { die(err) } - contracts, err := httpClient.ManagerContractsGet("") + contracts, err := httpClient.ManagerContracts("") if err != nil { die(err) } - maintenance, err := httpClient.ManagerMaintenanceGet() + maintenance, err := httpClient.ManagerMaintenance() if err != nil { die(err) } @@ -435,7 +435,7 @@ Maintenance: %v // managerpreferencescmd is the handler for the command `satc manager preferences`. // Prints the email preferences. func managerpreferencescmd() { - ep, err := httpClient.ManagerPreferencesGet() + ep, err := httpClient.ManagerPreferences() if err != nil { die(err) } @@ -460,7 +460,7 @@ func managersetpreferencescmd(email, wt string) { die(err) } - err = httpClient.ManagerPreferencesPost(api.EmailPreferences{ + err = httpClient.ManagerUpdatePreferences(api.EmailPreferences{ Email: email, WarnThreshold: threshold, }) @@ -474,7 +474,7 @@ func managersetpreferencescmd(email, wt string) { // managerpricescmd is the handler for the command `satc manager prices`. // Prints the current prices. func managerpricescmd() { - prices, err := httpClient.ManagerPricesGet() + prices, err := httpClient.ManagerPrices() if err != nil { die(err) } @@ -518,7 +518,7 @@ Repair a Slab (in SC): // `satc manager setprices [type] [payment_plan] [value]`. // Changes the current prices. func managersetpricescmd(typ, plan, v string) { - prices, err := httpClient.ManagerPricesGet() + prices, err := httpClient.ManagerPrices() if err != nil { die(err) } @@ -573,7 +573,7 @@ func managersetpricescmd(typ, plan, v string) { die(errors.New("invalid price type")) } - err = httpClient.ManagerPricesPost(prices) + err = httpClient.ManagerUpdatePrices(prices) if err != nil { die(err) } @@ -584,7 +584,7 @@ func managersetpricescmd(typ, plan, v string) { // managermaintenancecmd is the handler for the command `satc manager maintenance`. // Prints the information about the satellite maintenance. func managermaintenancecmd() { - maintenance, err := httpClient.ManagerMaintenanceGet() + maintenance, err := httpClient.ManagerMaintenance() if err != nil { die(err) } @@ -596,7 +596,7 @@ func managermaintenancecmd() { // `satc manager maintenance start`. // Starts a satellite maintenance. func managermaintenancestartcmd() { - err := httpClient.ManagerMaintenancePost(true) + err := httpClient.ManagerSetMaintenance(true) if err != nil { die(err) } @@ -608,7 +608,7 @@ func managermaintenancestartcmd() { // `satc manager maintenance stop`. // Stops a running satellite maintenance. func managermaintenancestopcmd() { - err := httpClient.ManagerMaintenancePost(false) + err := httpClient.ManagerSetMaintenance(false) if err != nil { die(err) } diff --git a/satc/portalcmd.go b/satc/portalcmd.go index 9536126..e2e4e0c 100644 --- a/satc/portalcmd.go +++ b/satc/portalcmd.go @@ -59,11 +59,11 @@ Examples of [validity] include '0.5h', '1d', '2w', or 'noexpire' for a non-expir // portalcmd is the handler for the command `satc portal`. // Prints the portal information. func portalcmd() { - credits, err := httpClient.PortalCreditsGet() + credits, err := httpClient.PortalCredits() if err != nil { die(err) } - a, _, err := httpClient.PortalAnnouncementGet() + a, _, err := httpClient.PortalAnnouncement() if err != nil { die(err) } @@ -82,7 +82,7 @@ Remaining: %v credits // portalcreditscmd is the handler for the command `satc portal credits`. // Prints the credit information. func portalcreditscmd() { - credits, err := httpClient.PortalCreditsGet() + credits, err := httpClient.PortalCredits() if err != nil { die(err) } @@ -115,7 +115,7 @@ func portalcreditssetcmd(num, amt string) { Remaining: number, } - err = httpClient.PortalCreditsPost(credits) + err = httpClient.PortalSetCredits(credits) if err != nil { fmt.Println("Could not set credit information: ", err) die() @@ -126,7 +126,7 @@ func portalcreditssetcmd(num, amt string) { // portalannouncementcmd is the handler for the command `satc portal announcement`. // Prints the current portal announcement. func portalannouncementcmd() { - text, expires, err := httpClient.PortalAnnouncementGet() + text, expires, err := httpClient.PortalAnnouncement() if err != nil { die(err) } @@ -164,7 +164,7 @@ func portalannouncementsetcmd(path, validity string) { die(err) } - err = httpClient.PortalAnnouncementPost(string(b), expires) + err = httpClient.PortalSetAnnouncement(string(b), expires) if err != nil { die(err) } @@ -174,7 +174,7 @@ func portalannouncementsetcmd(path, validity string) { // portalannouncementremovecmd is the handler for the command `satc portal announcement // remove`. Clears the portal announcement. func portalannouncementremovecmd() { - err := httpClient.PortalAnnouncementPost("", 0) + err := httpClient.PortalSetAnnouncement("", 0) if err != nil { die(err) } diff --git a/satc/syncercmd.go b/satc/syncercmd.go new file mode 100644 index 0000000..aa5a846 --- /dev/null +++ b/satc/syncercmd.go @@ -0,0 +1,74 @@ +package main + +import ( + "fmt" + "os" + "text/tabwriter" + + "github.com/spf13/cobra" +) + +var ( + syncerCmd = &cobra.Command{ + Use: "syncer", + Short: "Perform syncer actions", + Long: "View and manage the syncer's connected peers.", + Run: wrap(syncercmd), + } + + syncerConnectCmd = &cobra.Command{ + Use: "connect [address]", + Short: "Connect to a peer", + Long: "Connect to a peer and add it to the node list.", + Run: wrap(syncerconnectcmd), + } + + syncerPeersCmd = &cobra.Command{ + Use: "peers", + Short: "View a list of peers", + Long: "View the current peer list.", + Run: wrap(syncerpeerscmd), + } +) + +// syncerconnectcmd is the handler for the command `satc syncer connect [address]`. +// Adds a new peer to the peer list. +func syncerconnectcmd(addr string) { + err := httpClient.SyncerConnect(addr) + if err != nil { + die("Could not add peer:", err) + } + fmt.Println("Added", addr, "to peer list.") +} + +// syncercmd is the handler for the command `satc syncer`. +// Prints the number of peers. +func syncercmd() { + peers, err := httpClient.SyncerPeers() + if err != nil { + die("Could not get syncer info:", err) + } + fmt.Println("Active peers:", len(peers)) +} + +// syncerpeerscmd is the handler for the command `satc syncer peers`. +// Prints a list of all peers. +func syncerpeerscmd() { + peers, err := httpClient.SyncerPeers() + if err != nil { + die("Could not get peer list:", err) + } + if len(peers) == 0 { + fmt.Println("No peers to show.") + return + } + fmt.Println(len(peers), "active peers:") + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "Version\tOutbound\tAddress") + for _, peer := range peers { + fmt.Fprintf(w, "%v\t%v\t%v\n", peer.Version, yesNo(!peer.Inbound), peer.Address) + } + if err := w.Flush(); err != nil { + die("failed to flush writer") + } +} diff --git a/satc/walletcmd.go b/satc/walletcmd.go index 34adfa7..0e54461 100644 --- a/satc/walletcmd.go +++ b/satc/walletcmd.go @@ -1,35 +1,17 @@ package main import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "errors" "fmt" - "math" - "math/big" - "os" - "strconv" - "strings" - "syscall" - "time" - "github.com/mike76-dev/sia-satellite/modules" - "github.com/mike76-dev/sia-satellite/modules/wallet" - "github.com/mike76-dev/sia-satellite/node/api" "github.com/spf13/cobra" - "go.sia.tech/core/types" - - "golang.org/x/term" ) var ( walletAddressCmd = &cobra.Command{ Use: "address", Short: "Get a new wallet address", - Long: "Generate a new wallet address from the wallet's primary seed.", + Long: "Generate a new wallet address from the wallet's seed.", Run: wrap(walletaddresscmd), } @@ -47,21 +29,6 @@ var ( Run: wrap(walletbalancecmd), } - walletBroadcastCmd = &cobra.Command{ - Use: "broadcast [txn]", - Short: "Broadcast a transaction", - Long: `Broadcast a JSON-encoded transaction to connected peers. The transaction must -be valid. txn may be either JSON, base64, or a file containing either.`, - Run: wrap(walletbroadcastcmd), - } - - walletChangePasswordCmd = &cobra.Command{ - Use: "change-password", - Short: "Change the wallet password", - Long: "Change the encryption password of the wallet, re-encrypting all keys + seeds kept by the wallet.", - Run: wrap(walletchangepasswordcmd), - } - walletCmd = &cobra.Command{ Use: "wallet", Short: "Perform wallet actions", @@ -80,49 +47,6 @@ The smallest unit of Siacoins is the Hasting. One Siacoin is 10^24 Hastings. Oth Run: wrap(walletbalancecmd), } - walletInitCmd = &cobra.Command{ - Use: "init", - Short: "Initialize and encrypt a new wallet", - Long: `Generate a new wallet from a randomly generated seed, and encrypt it. -By default the wallet encryption / unlock password is the same as the generated seed.`, - Run: wrap(walletinitcmd), - } - - walletInitSeedCmd = &cobra.Command{ - Use: "init-seed", - Short: "Initialize and encrypt a new wallet using a pre-existing seed", - Long: `Initialize and encrypt a new wallet using a pre-existing seed.`, - Run: wrap(walletinitseedcmd), - } - - walletLoadCmd = &cobra.Command{ - Use: "load", - Short: "Load a wallet seed", - // Run field is not set, as the load command itself is not a valid command. - // A subcommand must be provided. - } - - walletLoadSeedCmd = &cobra.Command{ - Use: `seed`, - Short: "Add a seed to the wallet", - Long: "Loads an auxiliary seed into the wallet.", - Run: wrap(walletloadseedcmd), - } - - walletLockCmd = &cobra.Command{ - Use: "lock", - Short: "Lock the wallet", - Long: "Lock the wallet, preventing further use", - Run: wrap(walletlockcmd), - } - - walletSeedsCmd = &cobra.Command{ - Use: "seeds", - Short: "View information about your seeds", - Long: "View your primary and auxiliary wallet seeds.", - Run: wrap(walletseedscmd), - } - walletSendCmd = &cobra.Command{ Use: "send", Short: "Send Siacoins to an address", @@ -140,235 +64,40 @@ If no unit is supplied, Hastings will be assumed. A dynamic transaction fee is applied depending on the size of the transaction and how busy the network is.`, Run: wrap(walletsendsiacoinscmd), } - - walletSignCmd = &cobra.Command{ - Use: "sign [txn] [tosign]", - Short: "Sign a transaction", - Long: `Sign a transaction. If satd is running with an unlocked wallet, the -/wallet/sign API call will be used. Otherwise, sign will prompt for the wallet -seed, and the signing key(s) will be regenerated. -txn may be either JSON, base64, or a file containing either. -tosign is an optional list of indices. Each index corresponds to a -TransactionSignature in the txn that will be filled in. If no indices are -provided, the wallet will fill in every TransactionSignature it has keys for.`, - Run: walletsigncmd, - } - - walletSweepCmd = &cobra.Command{ - Use: "sweep", - Short: "Sweep Siacoins from a seed.", - Long: `Sweep Siacoins from a seed. The outputs belonging to the seed -will be sent to your wallet.`, - Run: wrap(walletsweepcmd), - } - - walletTransactionsCmd = &cobra.Command{ - Use: "transactions", - Short: "View transactions", - Long: "View transactions related to addresses spendable by the wallet, providing a net flow of Siacoins for each transaction", - Run: wrap(wallettransactionscmd), - } - - walletUnlockCmd = &cobra.Command{ - Use: `unlock`, - Short: "Unlock the wallet", - Long: `Decrypt and load the wallet into memory. -Automatic unlocking is also supported via environment variable: if the -SATD_WALLET_PASSWORD environment variable is set, the unlock command will -use it instead of displaying the typical interactive prompt.`, - Run: wrap(walletunlockcmd), - } ) -const askPasswordText = "We need to encrypt the new data using the current wallet password, please provide: " - -const ( - currentPasswordText = "Current Password: " - newPasswordText = "New Password: " - confirmPasswordText = "Confirm: " -) - -// For an unconfirmed Transaction, the TransactionTimestamp field is set to the -// maximum value of time. -var unconfirmedTransactionTimestamp = time.Unix(math.MaxInt64, math.MaxInt64) - -// passwordPrompt securely reads a password from stdin. -func passwordPrompt(prompt string) (pw string, err error) { - fmt.Print(prompt) - if insecureInput { - pw, err = bufio.NewReader(os.Stdin).ReadString('\n') - } else { - var pwBytes []byte - pwBytes, err = term.ReadPassword(int(syscall.Stdin)) - pw = string(pwBytes) - } - fmt.Println() - if err != nil { - err = fmt.Errorf("error reading password: %w", err) - } - return strings.TrimSpace(pw), err -} - -// confirmPassword requests confirmation of a previously-entered password. -func confirmPassword(prev string) error { - pw, err := passwordPrompt(confirmPasswordText) - if err != nil { - return err - } else if pw != prev { - return errors.New("passwords do not match") - } - return nil -} - // walletaddresscmd fetches a new address from the wallet that will be able to // receive coins. func walletaddresscmd() { - addr, err := httpClient.WalletAddressGet() + addr, err := httpClient.WalletAddress() if err != nil { die("Could not generate new address:", err) } - fmt.Printf("Created new address: %s\n", addr.Address) + fmt.Printf("Created new address: %s\n", addr) } // walletaddressescmd fetches the list of addresses that the wallet knows. func walletaddressescmd() { - addrs, err := httpClient.WalletAddressesGet() + addrs, err := httpClient.WalletAddresses() if err != nil { die("Failed to fetch addresses:", err) } - for _, addr := range addrs.Addresses { + for _, addr := range addrs { fmt.Println(addr) } } -// walletchangepasswordcmd changes the password of the wallet. -func walletchangepasswordcmd() { - currentPassword, err := passwordPrompt(currentPasswordText) - if err != nil { - die("Reading password failed:", err) - } - newPassword, err := passwordPrompt(newPasswordText) - if err != nil { - die("Reading password failed:", err) - } else if err = confirmPassword(newPassword); err != nil { - die(err) - } - err = httpClient.WalletChangePasswordPost(currentPassword, newPassword) - if err != nil { - die("Changing the password failed:", err) - } - fmt.Println("Password changed successfully.") -} - -// walletinitcmd encrypts the wallet with the given password. -func walletinitcmd() { - var password string - var err error - if initPassword { - password, err = passwordPrompt("Wallet password: ") - if err != nil { - die("Reading password failed:", err) - } else if err = confirmPassword(password); err != nil { - die(err) - } - } - er, err := httpClient.WalletInitPost(password, initForce) - if err != nil { - die("Error when encrypting wallet:", err) - } - fmt.Printf("Recovery seed:\n%s\n\n", er.PrimarySeed) - if initPassword { - fmt.Printf("Wallet encrypted with given password\n") - } else { - fmt.Printf("Wallet encrypted with password:\n%s\n", er.PrimarySeed) - } -} - -// walletinitseedcmd initializes the wallet from a preexisting seed. -func walletinitseedcmd() { - seed, err := passwordPrompt("Seed: ") - if err != nil { - die("Reading seed failed:", err) - } - var password string - if initPassword { - password, err = passwordPrompt("Wallet password: ") - if err != nil { - die("Reading password failed:", err) - } else if err = confirmPassword(password); err != nil { - die(err) - } - } - err = httpClient.WalletInitSeedPost(seed, password, initForce) - if err != nil { - die("Could not initialize wallet from seed:", err) - } - if initPassword { - fmt.Println("Wallet initialized and encrypted with given password.") - } else { - fmt.Println("Wallet initialized and encrypted with seed.") - } -} - -// walletloadseedcmd adds a seed to the wallet's list of seeds. -func walletloadseedcmd() { - seed, err := passwordPrompt("New seed: ") - if err != nil { - die("Reading seed failed:", err) - } - password, err := passwordPrompt(askPasswordText) - if err != nil { - die("Reading password failed:", err) - } - err = httpClient.WalletSeedPost(seed, password) - if err != nil { - die("Could not add seed:", err) - } - fmt.Println("Added Key") -} - -// walletlockcmd locks the wallet. -func walletlockcmd() { - err := httpClient.WalletLockPost() - if err != nil { - die("Could not lock wallet:", err) - } -} - -// walletseedcmd returns the current seed. -func walletseedscmd() { - seedInfo, err := httpClient.WalletSeedsGet() - if err != nil { - die("Error retrieving the current seed:", err) - } - fmt.Println("Primary Seed:") - fmt.Println(seedInfo.PrimarySeed) - if len(seedInfo.AllSeeds) == 1 { - // AllSeeds includes the primary seed. - return - } - fmt.Println() - fmt.Println("Auxiliary Seeds:") - for _, seed := range seedInfo.AllSeeds { - if seed == seedInfo.PrimarySeed { - continue - } - fmt.Println() // Extra newline for readability. - fmt.Println(seed) - } -} - // walletsendsiacoinscmd sends Siacoins to a destination address. func walletsendsiacoinscmd(amount, dest string) { value, err := types.ParseCurrency(amount) if err != nil { die("Could not parse amount:", err) } - var hash types.Address - if err := hash.UnmarshalText([]byte(dest)); err != nil { + var addr types.Address + if err := addr.UnmarshalText([]byte(dest)); err != nil { die("Failed to parse destination address", err) } - _, err = httpClient.WalletSiacoinsPost(value, hash, walletTxnFeeIncluded) + err = httpClient.WalletSendSiacoins(value, addr) if err != nil { die("Could not send Siacoins:", err) } @@ -377,218 +106,27 @@ func walletsendsiacoinscmd(amount, dest string) { // walletbalancecmd retrieves and displays information about the wallet. func walletbalancecmd() { - status, err := httpClient.WalletGet() - if modules.ContainsError(err, api.ErrAPICallNotRecognized) { - // Assume module is not loaded if status command is not recognized. - fmt.Printf("Wallet:\n Status: %s\n\n", moduleNotReadyStatus) - return - } else if err != nil { - die("Could not get wallet status:", err) - } - - fees, err := httpClient.TransactionPoolFeeGet() + status, err := httpClient.WalletBalance() if err != nil { - die("Could not get fee estimation:", err) - } - encStatus := "Unencrypted" - if status.Encrypted { - encStatus = "Encrypted" - } - if !status.Unlocked { - fmt.Printf(`Wallet status: -%v, Locked -Unlock the wallet to view balance -`, encStatus) - return + die("Could not get wallet status:", err) } - unconfirmedBalance := status.ConfirmedSiacoinBalance.Add(status.UnconfirmedIncomingSiacoins).Sub(status.UnconfirmedOutgoingSiacoins) + unconfirmedBalance := status.Siacoins.Add(status.IncomingSiacoins).Sub(status.OutgoingSiacoins) var delta string - if unconfirmedBalance.Cmp(status.ConfirmedSiacoinBalance) >= 0 { - delta = "+" + unconfirmedBalance.Sub(status.ConfirmedSiacoinBalance).String() + if unconfirmedBalance.Cmp(status.Siacoins) >= 0 { + delta = "+" + unconfirmedBalance.Sub(status.Siacoins).String() } else { - delta = "-" + status.ConfirmedSiacoinBalance.Sub(unconfirmedBalance).String() + delta = "-" + status.Siacoins.Sub(unconfirmedBalance).String() } fmt.Printf(`Wallet status: -%s, Unlocked -Height: %v -Confirmed Balance: %v -Unconfirmed Delta: %v -Exact: %v H -Estimated Fee: %v / KB -`, encStatus, status.Height, status.ConfirmedSiacoinBalance, delta, - status.ConfirmedSiacoinBalance.ExactString(), fees.Maximum.Mul64(1e3)) -} - -// walletbroadcastcmd broadcasts a transaction. -func walletbroadcastcmd(txnStr string) { - txn, err := parseTxn(txnStr) - if err != nil { - die("Could not decode transaction:", err) - } - err = httpClient.TransactionPoolRawPost(txn, nil) - if err != nil { - die("Could not broadcast transaction:", err) - } - fmt.Println("Transaction has been broadcast successfully") -} - -// walletsweepcmd sweeps coins and funds from a seed. -func walletsweepcmd() { - seed, err := passwordPrompt("Seed: ") - if err != nil { - die("Reading seed failed:", err) - } - - swept, err := httpClient.WalletSweepPost(seed) - if err != nil { - die("Could not sweep seed:", err) - } - fmt.Printf("Swept %v from seed.\n", swept.Coins) -} - -// walletsigncmd signs a transaction. -func walletsigncmd(cmd *cobra.Command, args []string) { - if len(args) < 1 { - _ = cmd.UsageFunc()(cmd) - os.Exit(exitCodeUsage) - } - - txn, err := parseTxn(args[0]) - if err != nil { - die("Could not decode transaction:", err) - } - - var toSign []types.Hash256 - for _, arg := range args[1:] { - index, err := strconv.ParseUint(arg, 10, 32) - if err != nil { - die("Invalid signature index", index, "(must be an non-negative integer)") - } else if index >= uint64(len(txn.Signatures)) { - die("Invalid signature index", index, "(transaction only has", len(txn.Signatures), "signatures)") - } - toSign = append(toSign, txn.Signatures[index].ParentID) - } - - // Try API first. - wspr, err := httpClient.WalletSignPost(txn, toSign) - if err == nil { - txn = wspr.Transaction - } else { - // If satd is running, but the wallet is locked, assume the user - // wanted to sign with satd. - if modules.ContainsError(err, modules.ErrLockedWallet) { - die("Signing via API failed: satd is running, but the wallet is locked.") - } - - // satd is not running; fallback to offline keygen. - walletsigncmdoffline(&txn, toSign) - } - - if walletRawTxn { - var buf bytes.Buffer - e := types.NewEncoder(&buf) - txn.EncodeTo(e) - e.Flush() - _, err = base64.NewEncoder(base64.StdEncoding, os.Stdout).Write(buf.Bytes()) - } else { - err = json.NewEncoder(os.Stdout).Encode(txn) - } - if err != nil { - die("failed to encode txn", err) - } - fmt.Println() -} - -// walletsigncmdoffline is a helper for walletsigncmd that handles signing -// transactions without satd. -func walletsigncmdoffline(txn *types.Transaction, toSign []types.Hash256) { - fmt.Println("Enter your wallet seed to generate the signing key(s) now and sign without satd.") - seedString, err := passwordPrompt("Seed: ") - if err != nil { - die("Reading seed failed:", err) - } - seed, err := modules.DecodeBIP39Phrase(seedString) - if err != nil { - die("Invalid seed:", err) - } - // Signing via seed may take a while, since we need to regenerate - // keys. If it takes longer than a second, print a message to assure - // the user that this is normal. - done := make(chan struct{}) - go func() { - select { - case <-time.After(time.Second): - fmt.Println("Generating keys; this may take a few seconds...") - case <-done: - } - }() - err = wallet.SignTransaction(txn, seed, toSign, 180e3) - if err != nil { - die("Failed to sign transaction:", err) - } - close(done) -} - -// wallettransactionscmd lists all of the transactions related to the wallet, -// providing a net flow of Siacoins for each. -func wallettransactionscmd() { - wtg, err := httpClient.WalletTransactionsGet(walletStartHeight, walletEndHeight) - if err != nil { - die("Could not fetch transaction history:", err) - } - cg, err := httpClient.ConsensusGet() - if err != nil { - die("Could not fetch consensus information:", err) - } - fmt.Println(" [timestamp] [height] [transaction id] [net siacoins]") - txns := append(wtg.ConfirmedTransactions, wtg.UnconfirmedTransactions...) - sts, err := wallet.ComputeValuedTransactions(txns, cg.Height) - if err != nil { - die("Could not compute valued transaction: ", err) - } - for _, txn := range sts { - // Convert the Siacoins to a float. - incomingSiacoinsFloat, _ := new(big.Rat).SetFrac(txn.ConfirmedIncomingValue.Big(), types.HastingsPerSiacoin.Big()).Float64() - outgoingSiacoinsFloat, _ := new(big.Rat).SetFrac(txn.ConfirmedOutgoingValue.Big(), types.HastingsPerSiacoin.Big()).Float64() - - // Print the results. - if !txn.ConfirmationTimestamp.Equal(unconfirmedTransactionTimestamp) { - fmt.Println(txn.ConfirmationTimestamp.Format("2006-01-02 15:04:05-0700")) - } else { - fmt.Printf(" unconfirmed") - } - if txn.ConfirmationHeight < 1e9 { - fmt.Printf("%12v", txn.ConfirmationHeight) - } else { - fmt.Printf(" unconfirmed") - } - fmt.Printf("%67v%15.2f SC", txn.TransactionID, incomingSiacoinsFloat - outgoingSiacoinsFloat) - } -} - -// walletunlockcmd unlocks a saved wallet. -func walletunlockcmd() { - // Try reading from environment variable first, then fallback to - // interactive method. Also allow overriding auto-unlock via -p. - password := os.Getenv("SATD_WALLET_PASSWORD") - if password != "" && !initPassword { - fmt.Println("Using SATD_WALLET_PASSWORD environment variable") - err := httpClient.WalletUnlockPost(password) - if err != nil { - fmt.Println("Automatic unlock failed!") - } else { - fmt.Println("Wallet unlocked") - return - } - } - password, err := passwordPrompt("Wallet password: ") - if err != nil { - die("Reading password failed:", err) - } - err = httpClient.WalletUnlockPost(password) - if err != nil { - die("Could not unlock wallet:", err) - } +Height: %v +Confirmed SC Balance: %v +Unconfirmed Delta: %v +Exact: %v H +SF Balance: %v +Estimated Fee: %v / KB +`, status.Height, status.Siacoins, delta, + status.Siacoins.ExactString(), status.Siafunds, + status.RecommendedFee.Mul64(1e3)) } diff --git a/satd/daemon.go b/satd/daemon.go index 915f7ea..3c97110 100644 --- a/satd/daemon.go +++ b/satd/daemon.go @@ -3,31 +3,19 @@ package main import ( "fmt" "log" + "net" "os" "os/signal" "time" "github.com/mike76-dev/sia-satellite/internal/build" + "github.com/mike76-dev/sia-satellite/node" "github.com/mike76-dev/sia-satellite/node/api/server" "github.com/mike76-dev/sia-satellite/persist" ) -// tryAutoUnlock will try to automatically unlock the wallet if the -// environment variable is set. -func tryAutoUnlock(srv *server.Server) { - password := os.Getenv("SATD_WALLET_PASSWORD") - if password != "" { - fmt.Println("Wallet Password found, attempting to auto-unlock wallet...") - if err := srv.Unlock(password); err != nil { - fmt.Println("Auto-unlock failed:", err) - } else { - fmt.Println("Auto-unlock successful.") - } - } -} - // startDaemon starts the satd server. -func startDaemon(config *persist.SatdConfig, apiPassword string, dbPassword string) error { +func startDaemon(config *persist.SatdConfig, apiPassword, dbPassword, seed string) error { loadStart := time.Now() fmt.Printf("satd v%v\n", build.NodeVersion) @@ -38,38 +26,24 @@ func startDaemon(config *persist.SatdConfig, apiPassword string, dbPassword stri } fmt.Println("Loading...") - // Start and run the server. - srv, err := server.New(config, apiPassword, dbPassword, loadStart) + // Start listening to the API requests. + l, err := net.Listen("tcp", config.APIAddr) if err != nil { - return err + log.Fatal(err) } - - // Attempt to auto-unlock the wallet using the SATD_WALLET_PASSWORD env variable. - tryAutoUnlock(srv) - - // Listen for kill signals. - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, os.Interrupt) - - startupTime := time.Since(loadStart) - fmt.Printf("Finished full setup in %s\n", startupTime.Truncate(time.Second).String()) - - // Wait for Serve to return or for kill signal to be caught. - err = func() error { - select { - case err := <-srv.ServeErr(): - return err - case <-sigChan: - fmt.Println("\rCaught stop signal, quitting...") - return srv.Close() - } - }() + n, err := node.New(config, dbPassword, seed, loadStart) if err != nil { - log.Fatalln(err) + log.Fatal(err) } - - // Wait for server to complete shutdown. - srv.WaitClose() + log.Println("p2p: Listening on", n.Syncer.Addr()) + stop := n.Start() + log.Println("api: Listening on", l.Addr()) + go server.StartWeb(l, n, apiPassword) + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt) + <-signalCh + log.Println("Shutting down...") + stop() return nil } diff --git a/satd/main.go b/satd/main.go index 0a56a4c..1f31e8d 100644 --- a/satd/main.go +++ b/satd/main.go @@ -7,19 +7,16 @@ import ( "os" "github.com/mike76-dev/sia-satellite/persist" - "golang.org/x/term" ) // Default config values. var defaultConfig = persist.SatdConfig{ Name: "", - UserAgent: "Sat-Agent", GatewayAddr: ":0", APIAddr: "localhost:9990", SatelliteAddr: ":9992", Dir: ".", - Bootstrap: true, DBUser: "", DBName: "satellite", PortalPort: ":8080", @@ -59,6 +56,22 @@ func getDBPassword() string { return dbPassword } +func getWalletSeed() string { + seed := os.Getenv("SATD_WALLET_SEED") + if seed != "" { + log.Println("Using SATD_WALLET_SEED environment variable.") + } else { + fmt.Print("Enter wallet seed: ") + pw, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Println() + if err != nil { + log.Fatalf("Could not read database password: %v\n", err) + } + seed = string(pw) + } + return seed +} + func main() { log.SetFlags(0) @@ -77,12 +90,10 @@ func main() { // Parse command line flags. If set, they override the loaded config. name := flag.String("name", "", "name of the satellite node") - userAgent := flag.String("agent", "", "custom agent used for API calls") gatewayAddr := flag.String("addr", "", "address to listen on for peer connections") apiAddr := flag.String("api-addr", "", "address to serve API on") satelliteAddr := flag.String("sat-addr", "", "address to listen on for renter requests") dir := flag.String("dir", "", "directory to store node state in") - bootstrap := flag.Bool("bootstrap", true, "bootstrap the gateway and consensus modules") dbUser := flag.String("db-user", "", "username for accessing the database") dbName := flag.String("db-name", "", "name of MYSQL database") portalPort := flag.String("portal", "", "port number the portal server listens at") @@ -90,9 +101,6 @@ func main() { if *name != "" { config.Name = *name } - if *userAgent != "" { - config.UserAgent = *userAgent - } if *gatewayAddr != "" { config.GatewayAddr = *gatewayAddr } @@ -105,7 +113,6 @@ func main() { if *dir != "" { config.Dir = *dir } - config.Bootstrap = *bootstrap if *dbUser != "" { config.DBUser = *dbUser } @@ -128,6 +135,9 @@ func main() { // Fetch DB password. dbPassword := getDBPassword() + // Fetch wallet seed. + seed := getWalletSeed() + // Create the state directory if it does not yet exist. // This also checks if the provided directory parameter is valid. err = os.MkdirAll(config.Dir, 0700) @@ -136,7 +146,7 @@ func main() { } // Start satd. startDaemon will only return when it is shutting down. - err = startDaemon(&config, apiPassword, dbPassword) + err = startDaemon(&config, apiPassword, dbPassword, seed) if err != nil { log.Fatalln(err) } diff --git a/satdconfig.json b/satdconfig.json index a78eb6f..f23b0de 100644 --- a/satdconfig.json +++ b/satdconfig.json @@ -1,14 +1,12 @@ "Satd Configuration" -"0.3.0" +"0.4.0" { "name": "", - "agent": "Sat-Agent", "gateway": ":0", "api": "localhost:9990", "satellite": ":9992", "mux": ":9993", "dir": "", - "bootstrap": true, "dbUser": "", "dbName": "", "portal": ":8080"