diff --git a/LICENSE b/LICENSE index 261eeb9e..a95f119d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,21 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +MIT License + +Copyright (c) 2025 Oleg Baranov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index cedc6587..5b54a6e0 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Based on TON][ton-svg]][ton] [![Telegram Channel][tgc-svg]][tg-channel] -![Coverage](https://img.shields.io/badge/Coverage-69.7%25-yellow) +![Coverage](https://img.shields.io/badge/Coverage-70.5%25-brightgreen) Golang library for interacting with TON blockchain. @@ -12,13 +12,15 @@ This library is native golang implementation of ADNL and lite protocol. It works It is concurrent safe and can be used from multiple goroutines under high workloads. -All main TON protocols are implemented: ADNL, DHT, RLDP, Overlays, HTTP-RLDP, etc. +All main TON protocols are implemented: ADNL, DHT, RLDP, Overlays, etc. ------ If you love this library and want to support its development you can donate any amount of coins to this ton address ☺️ `EQBx6tZZWa2Tbv6BvgcvegoOQxkRrVaBVwBOoW85nbP37_Go` +You can find many usage examples in **[example](https://github.com/xssnick/tonutils-go/tree/master/example)** directory + ### How to use - [Connection](#Connection) - [Wallet](#Wallet) @@ -56,14 +58,9 @@ If you love this library and want to support its development you can donate any - [Proof creation](#Proofs) - [Network](https://github.com/xssnick/tonutils-go/tree/master/adnl) - [ADNL UDP](https://github.com/xssnick/tonutils-go/blob/master/adnl/adnl_test.go) - - [TON Site request](https://github.com/xssnick/tonutils-go/blob/master/example/site-request/main.go) - - [RLDP-HTTP Client-Server](https://github.com/xssnick/tonutils-go/blob/master/example/http-rldp-highload-test/main.go) - [Custom reconnect policy](#Custom-reconnect-policy) - [Features to implement](#Features-to-implement) - -You can find usage examples in **[example](https://github.com/xssnick/tonutils-go/tree/master/example)** directory - You could also join our **[Telegram channel](https://t.me/tonutilsnews)** and **[group](https://t.me/tonutils)**, feel free ask any questions :) ### Connection @@ -502,7 +499,7 @@ client.SetOnDisconnect(func(addr, serverKey string) { * ✅ Payment channels * ✅ Liteserver proofs automatic validation * DHT Server -* TVM +* TVM (Contract execution emulation) [ton-svg]: https://img.shields.io/badge/Based%20on-TON-blue diff --git a/address/addr.go b/address/addr.go index 557d30e4..5f5c6bc0 100644 --- a/address/addr.go +++ b/address/addr.go @@ -7,10 +7,9 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/xssnick/tonutils-go/crc16" "strconv" "strings" - - "github.com/sigurn/crc16" ) type AddrType int @@ -85,8 +84,6 @@ func (a *Address) BitsLen() uint { return a.bitsLen } -var crcTable = crc16.MakeTable(crc16.CRC16_XMODEM) - func (a *Address) StringRaw() string { switch a.addrType { case NoneAddress: @@ -105,7 +102,7 @@ func (a *Address) String() string { case StdAddress: var address [36]byte copy(address[0:34], a.prepareChecksumData()) - binary.BigEndian.PutUint16(address[34:], crc16.Checksum(address[:34], crcTable)) + binary.BigEndian.PutUint16(address[34:], crc16.ChecksumXMODEM(address[:34])) return base64.RawURLEncoding.EncodeToString(address[:]) case ExtAddress: address := make([]byte, 1+4+len(a.data)) @@ -232,11 +229,11 @@ func ParseAddr(addr string) (*Address, error) { } if len(data) != 36 { - return nil, errors.New("incorrect address data") + return nil, errors.New("incorrect address data " + addr) } checksum := data[len(data)-2:] - if crc16.Checksum(data[:len(data)-2], crc16.MakeTable(crc16.CRC16_XMODEM)) != binary.BigEndian.Uint16(checksum) { + if crc16.ChecksumXMODEM(data[:len(data)-2]) != binary.BigEndian.Uint16(checksum) { return nil, errors.New("invalid address") } @@ -267,7 +264,7 @@ func ParseRawAddr(addr string) (*Address, error) { } func (a *Address) Checksum() uint16 { - return crc16.Checksum(a.prepareChecksumData(), crc16.MakeTable(crc16.CRC16_XMODEM)) + return crc16.ChecksumXMODEM(a.prepareChecksumData()) } func (a *Address) prepareChecksumData() []byte { diff --git a/address/bit.go b/address/bit.go index fdc06b0e..ef737894 100644 --- a/address/bit.go +++ b/address/bit.go @@ -1,17 +1,24 @@ package address -// TODO add length checks and panic on errors - func setBit(n *byte, pos uint) { + if pos > 7 { + panic("bit position out of range [0..7]") + } *n |= 1 << pos } func clearBit(n *byte, pos uint) { + if pos > 7 { + panic("bit position out of range [0..7]") + } mask := ^(1 << pos) *n &= byte(mask) } func hasBit(n byte, pos uint) bool { + if pos > 7 { + panic("bit position out of range [0..7]") + } val := n & (1 << pos) return val > 0 } diff --git a/address/bit_test.go b/address/bit_test.go index de8f26d0..4dd64074 100644 --- a/address/bit_test.go +++ b/address/bit_test.go @@ -7,15 +7,26 @@ func TestClearBit(t *testing.T) { n *byte pos uint } + bytePtr := func(v byte) *byte { b := v; return &b } tests := []struct { name string args args }{ - // TODO: Add test cases. + {"0", args{bytePtr(0b00000001), 0}}, + {"1", args{bytePtr(0b00000010), 1}}, + {"2", args{bytePtr(0b00000100), 2}}, + {"3", args{bytePtr(0b00001000), 3}}, + {"4", args{bytePtr(0b00010000), 4}}, + {"5", args{bytePtr(0b00100000), 5}}, + {"6", args{bytePtr(0b01000000), 6}}, + {"7", args{bytePtr(0b10000000), 7}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { clearBit(tt.args.n, tt.args.pos) + if *tt.args.n != 0 { + t.Errorf("ClearBit() = %v, n = %v", tt.name, *tt.args.n) + } }) } } @@ -54,15 +65,26 @@ func TestSetBit(t *testing.T) { n *byte pos uint } + bytePtr := func(v byte) *byte { b := v; return &b } tests := []struct { name string args args }{ - // TODO: Add test cases. + {"0", args{bytePtr(0b00000000), 0}}, + {"1", args{bytePtr(0b00000000), 1}}, + {"2", args{bytePtr(0b00000000), 2}}, + {"3", args{bytePtr(0b00000000), 3}}, + {"4", args{bytePtr(0b00000000), 4}}, + {"5", args{bytePtr(0b00000000), 5}}, + {"6", args{bytePtr(0b00000000), 6}}, + {"7", args{bytePtr(0b00000000), 7}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { setBit(tt.args.n, tt.args.pos) + if *tt.args.n == 0 { + t.Errorf("SetBit() = %v, n = %v", tt.name, *tt.args.n) + } }) } } diff --git a/adnl/address/address.go b/adnl/address/address.go index 69190f90..4404f7bb 100644 --- a/adnl/address/address.go +++ b/adnl/address/address.go @@ -7,6 +7,7 @@ import ( func init() { tl.Register(UDP{}, "adnl.address.udp ip:int port:int = adnl.Address") + tl.Register(UDP6{}, "adnl.address.udp6 ip:int128 port:int = adnl.Address") tl.Register(List{}, "adnl.addressList addrs:(vector adnl.Address) version:int reinit_date:int priority:int expire_at:int = adnl.AddressList") } @@ -15,8 +16,13 @@ type UDP struct { Port int32 `tl:"int"` } +type UDP6 struct { + IP net.IP `tl:"int128"` + Port int32 `tl:"int"` +} + type List struct { - Addresses []*UDP `tl:"vector struct boxed"` + Addresses []*UDP `tl:"vector struct boxed"` // TODO: v6 too Version int32 `tl:"int"` ReinitDate int32 `tl:"int"` Priority int32 `tl:"int"` diff --git a/adnl/adnl.go b/adnl/adnl.go index ad7d658d..80287964 100644 --- a/adnl/adnl.go +++ b/adnl/adnl.go @@ -71,6 +71,8 @@ type ADNL struct { recvPriorityAddrVer int32 ourAddrVerOnPeerSide int32 + peerID []byte + sharedKey []byte peerKey ed25519.PublicKey ourAddresses unsafe.Pointer @@ -175,6 +177,16 @@ func (a *ADNL) processPacket(packet *PacketContent, fromChannel bool) (err error if !fromChannel && packet.From != nil { a.mx.Lock() if a.peerKey == nil { + a.sharedKey, err = keys.SharedKey(a.ourKey, packet.From.Key) + if err != nil { + return err + } + + a.peerID, err = tl.Hash(keys.PublicKeyED25519{Key: packet.From.Key}) + if err != nil { + return err + } + a.peerKey = packet.From.Key } a.mx.Unlock() @@ -689,7 +701,7 @@ func (a *ADNL) send(buf []byte) error { // not close on io timeout because it can be triggered by network overload if !strings.Contains(err.Error(), "i/o timeout") { // it should trigger disconnect handler in read routine - a.writer.Close() + a.Close() } return err } else if n != len(buf) { @@ -735,12 +747,11 @@ func (a *ADNL) GetAddressList() address.List { } func (a *ADNL) GetID() []byte { - id, _ := tl.Hash(keys.PublicKeyED25519{Key: a.peerKey}) - return id + return append([]byte{}, a.peerID...) } func (a *ADNL) GetPubKey() ed25519.PublicKey { - return a.peerKey + return append(ed25519.PublicKey{}, a.peerKey...) } func (a *ADNL) Reinit() { @@ -825,23 +836,14 @@ func (a *ADNL) createPacket(seqno int64, isResp bool, msgs ...any) ([]byte, erro hash := sha256.Sum256(packetData) checksum := hash[:] - key, err := keys.SharedKey(a.ourKey, a.peerKey) - if err != nil { - return nil, err - } - - ctr, err := keys.BuildSharedCipher(key, checksum) + ctr, err := keys.BuildSharedCipher(a.sharedKey, checksum) if err != nil { return nil, err } ctr.XORKeyStream(packetData, packetData) - enc, err := tl.Hash(keys.PublicKeyED25519{Key: a.peerKey}) - if err != nil { - return nil, err - } - copy(bufData, enc) + copy(bufData, a.peerID) copy(bufData[32:], a.ourKey.Public().(ed25519.PublicKey)) copy(bufData[64:], checksum) diff --git a/adnl/conn.go b/adnl/conn.go index 51bca33a..ceeb97da 100644 --- a/adnl/conn.go +++ b/adnl/conn.go @@ -25,10 +25,12 @@ func newWriter(writer func(p []byte, deadline time.Time) (err error), close func } } +var ErrPeerConnClosed = errors.New("peer connection was closed") + func (c *clientConn) Write(b []byte, deadline time.Time) (n int, err error) { select { case <-c.closer: - return 0, fmt.Errorf("connection was closed") + return 0, ErrPeerConnClosed default: } diff --git a/adnl/dht/client.go b/adnl/dht/client.go index 449c5a64..88866e74 100644 --- a/adnl/dht/client.go +++ b/adnl/dht/client.go @@ -450,6 +450,7 @@ func (c *Client) FindValue(ctx context.Context, key *Key, continuation ...*Conti cond := sync.NewCond(&sync.Mutex{}) waitingThreads := 0 + stopped := false launchWorker := func() { for { @@ -465,12 +466,18 @@ func (c *Client) FindValue(ctx context.Context, key *Key, continuation ...*Conti for node == nil { waitingThreads++ if waitingThreads == threads { + stopped = true + cond.Broadcast() cond.L.Unlock() result <- nil return } cond.Wait() + if stopped { + cond.L.Unlock() + return + } node, _ = plist.Get() waitingThreads-- } @@ -485,20 +492,27 @@ func (c *Client) FindValue(ctx context.Context, key *Key, continuation ...*Conti switch v := val.(type) { case *Value: + cond.L.Lock() + if !stopped { + stopped = true + cond.Broadcast() + } + cond.L.Unlock() result <- &foundResult{value: v, node: node} return case []*Node: added := false + cond.L.Lock() for _, n := range v { if newNode, err := c.addNode(n); err == nil { plist.Add(newNode) added = true } } - if added { cond.Broadcast() } + cond.L.Unlock() } } } @@ -509,8 +523,20 @@ func (c *Client) FindValue(ctx context.Context, key *Key, continuation ...*Conti select { case <-ctx.Done(): + cond.L.Lock() + if !stopped { + stopped = true + cond.Broadcast() + } + cond.L.Unlock() return nil, nil, ctx.Err() case val := <-result: + cond.L.Lock() + if !stopped { + stopped = true + cond.Broadcast() + } + cond.L.Unlock() if val == nil { return nil, cont, ErrDHTValueIsNotFound } diff --git a/adnl/gateway.go b/adnl/gateway.go index 44758e9d..fe72435d 100644 --- a/adnl/gateway.go +++ b/adnl/gateway.go @@ -125,8 +125,7 @@ func NewGatewayWithNetManager(key ed25519.PrivateKey, reader NetManager) *Gatewa var PacketsBufferSize = 128 * 1024 var DefaultListener = func(addr string) (net.PacketConn, error) { - // since ip field in adnl accept only 4 bytes, we cannot fully support v6 right now - lp, err := net.ListenPacket("udp4", addr) + lp, err := net.ListenPacket("udp", addr) if err != nil { return nil, err } @@ -432,8 +431,23 @@ func (g *Gateway) registerClient(addr net.Addr, key ed25519.PublicKey, id string addrList.Version = addrList.ReinitDate a := g.initADNL() - a.SetAddresses(addrList) + + sharedKey, err := keys.SharedKey(a.ourKey, key) + if err != nil { + return nil, err + } + + peerId, err := tl.Hash(keys.PublicKeyED25519{Key: key}) + if err != nil { + return nil, err + } + + a.peerID = peerId + a.sharedKey = sharedKey a.peerKey = key + + a.SetAddresses(addrList) + a.addr = addr.String() a.writer = newWriter(func(p []byte, deadline time.Time) (err error) { currentAddr := *(*net.Addr)(atomic.LoadPointer(&peer.addr)) @@ -565,7 +579,9 @@ func (p *peerConn) SetDisconnectHandler(handler func(addr string, key ed25519.Pu p.server.mx.Unlock() if handler != nil { - handler(addr, key) + // run it async to avoid potential deadlock issues in user code + // in case closed under lock, and the same lock is used in handler + go handler(addr, key) } }) } diff --git a/adnl/keys/crypto_test.go b/adnl/keys/crypto_test.go index bc6fc513..fa95e28a 100644 --- a/adnl/keys/crypto_test.go +++ b/adnl/keys/crypto_test.go @@ -1,6 +1,9 @@ package keys import ( + "bytes" + "crypto/aes" + "crypto/cipher" "crypto/ed25519" "reflect" "testing" @@ -42,6 +45,19 @@ func Test_sharedKey(t *testing.T) { }, wantErr: false, }, + { + name: "invalid server key", + args: args{ + ourKey: ed25519.NewKeyFromSeed([]byte{ + 175, 46, 138, 194, 124, 100, 226, + 85, 88, 44, 196, 159, 130, 167, + 223, 23, 125, 231, 145, 177, 104, + 171, 189, 252, 16, 143, 108, 237, + 99, 32, 104, 10}), + serverKey: []byte{1, 2, 3}, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -56,3 +72,136 @@ func Test_sharedKey(t *testing.T) { }) } } + +func TestBuildSharedCipher(t *testing.T) { + key := []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + } + checksum := []byte{ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + } + + stream, err := BuildSharedCipher(key, checksum) + if err != nil { + t.Fatalf("BuildSharedCipher() error = %v", err) + } + + plaintext := []byte{ + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + } + got := make([]byte, len(plaintext)) + stream.XORKeyStream(got, plaintext) + + expectedKey := make([]byte, 32) + copy(expectedKey, key[:16]) + copy(expectedKey[16:], checksum[16:]) + expectedIV := make([]byte, 16) + copy(expectedIV, checksum[:4]) + copy(expectedIV[4:], key[20:]) + block, err := aes.NewCipher(expectedKey) + if err != nil { + t.Fatalf("failed to init AES cipher: %v", err) + } + expectedStream := cipher.NewCTR(block, expectedIV) + want := make([]byte, len(plaintext)) + expectedStream.XORKeyStream(want, plaintext) + + if !bytes.Equal(got, want) { + t.Errorf("BuildSharedCipher() produced %x, want %x", got, want) + } +} + +func TestNewCipherCtr(t *testing.T) { + key := []byte{ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + } + iv := []byte{ + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + } + stream, err := NewCipherCtr(key, iv) + if err != nil { + t.Fatalf("NewCipherCtr() error = %v", err) + } + + plaintext := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef} + got := make([]byte, len(plaintext)) + stream.XORKeyStream(got, plaintext) + + block, err := aes.NewCipher(key) + if err != nil { + t.Fatalf("failed to init AES cipher: %v", err) + } + expectedStream := cipher.NewCTR(block, iv) + want := make([]byte, len(plaintext)) + expectedStream.XORKeyStream(want, plaintext) + + if !bytes.Equal(got, want) { + t.Errorf("NewCipherCtr() produced %x, want %x", got, want) + } + + if _, err := NewCipherCtr(key[:15], iv); err == nil { + t.Fatal("NewCipherCtr() expected error for invalid key length") + } +} + +func TestEd25519PrivateToX25519(t *testing.T) { + seed := []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + } + priv := ed25519.NewKeyFromSeed(seed) + got := Ed25519PrivateToX25519(priv) + want := []byte{ + 0x38, 0x94, 0xee, 0xa4, 0x9c, 0x58, 0x0a, 0xef, + 0x81, 0x69, 0x35, 0x76, 0x2b, 0xe0, 0x49, 0x55, + 0x9d, 0x6d, 0x14, 0x40, 0xde, 0xde, 0x12, 0xe6, + 0xa1, 0x25, 0xf1, 0x84, 0x1f, 0xff, 0x8e, 0x6f, + } + + if !bytes.Equal(got, want) { + t.Errorf("Ed25519PrivateToX25519() = %x, want %x", got, want) + } +} + +func TestEd25519PubToX25519(t *testing.T) { + seed := []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + } + priv := ed25519.NewKeyFromSeed(seed) + got, err := Ed25519PubToX25519(priv.Public().(ed25519.PublicKey)) + if err != nil { + t.Fatalf("Ed25519PubToX25519() error = %v", err) + } + want := []byte{ + 0x47, 0x01, 0xd0, 0x84, 0x88, 0x45, 0x1f, 0x54, + 0x5a, 0x40, 0x9f, 0xb5, 0x8a, 0xe3, 0xe5, 0x85, + 0x81, 0xca, 0x40, 0xac, 0x3f, 0x7f, 0x11, 0x46, + 0x98, 0xcd, 0x71, 0xde, 0xac, 0x73, 0xca, 0x01, + } + + if !bytes.Equal(got, want) { + t.Errorf("Ed25519PubToX25519() = %x, want %x", got, want) + } +} + +func TestEd25519PubToX25519_InvalidKey(t *testing.T) { + if _, err := Ed25519PubToX25519(ed25519.PublicKey(make([]byte, 31))); err == nil { + t.Fatal("Ed25519PubToX25519() expected error for invalid key length") + } +} diff --git a/adnl/node/block.go b/adnl/node/block.go index 8937705d..6e819c58 100644 --- a/adnl/node/block.go +++ b/adnl/node/block.go @@ -1,9 +1,11 @@ package node -import "github.com/xssnick/tonutils-go/tl" +import ( + "github.com/xssnick/tonutils-go/tl" + "github.com/xssnick/tonutils-go/ton" +) func init() { - tl.Register(BlockIDExt{}, "tonNode.blockIdExt workchain:int shard:long seqno:int root_hash:int256 file_hash:int256 = tonNode.BlockIdExt") tl.Register(DownloadBlock{}, "tonNode.downloadBlock block:tonNode.blockIdExt = tonNode.Data") tl.Register(DownloadBlockFull{}, "tonNode.downloadBlockFull block:tonNode.blockIdExt = tonNode.DataFull") tl.Register(DataFull{}, "tonNode.dataFull id:tonNode.blockIdExt proof:bytes block:bytes is_link:Bool = tonNode.DataFull") @@ -15,34 +17,26 @@ func init() { } type DownloadBlock struct { - Block BlockIDExt `tl:"struct"` + Block ton.BlockIDExt `tl:"struct"` } type DownloadBlockFull struct { - Block BlockIDExt `tl:"struct"` -} - -type BlockIDExt struct { - Workchain int32 `tl:"int"` - Shard int64 `tl:"long"` - Seqno int32 `tl:"int"` - RootHash []byte `tl:"int256"` - FileHash []byte `tl:"int256"` + Block ton.BlockIDExt `tl:"struct"` } type DataFull struct { - ID BlockIDExt `tl:"struct"` - Proof []byte `tl:"bytes"` - Block []byte `tl:"bytes"` - IsLink bool `tl:"bool"` + ID ton.BlockIDExt `tl:"struct"` + Proof []byte `tl:"bytes"` + Block []byte `tl:"bytes"` + IsLink bool `tl:"bool"` } type DataFullEmpty struct{} type NewShardBlock struct { - ID BlockIDExt `tl:"struct"` - CCSeqno int32 `tl:"int"` - Data []byte `tl:"bytes"` + ID ton.BlockIDExt `tl:"struct"` + CCSeqno int32 `tl:"int"` + Data []byte `tl:"bytes"` } type NewShardBlockBroadcast struct { @@ -55,7 +49,7 @@ type BlockSignature struct { } type BlockBroadcast struct { - ID BlockIDExt `tl:"struct"` + ID ton.BlockIDExt `tl:"struct"` CatchainSeqno int32 `tl:"int"` ValidatorSetHash int32 `tl:"int"` Signatures []BlockSignature `tl:"vector struct"` diff --git a/adnl/overlay/manager-adnl.go b/adnl/overlay/manager-adnl.go index 74308239..d92b6452 100644 --- a/adnl/overlay/manager-adnl.go +++ b/adnl/overlay/manager-adnl.go @@ -7,13 +7,9 @@ import ( "fmt" "github.com/xssnick/tonutils-go/adnl" "github.com/xssnick/tonutils-go/tl" - "reflect" "sync" - "time" ) -const _PacketWaitTime = 15 * time.Millisecond - const _BroadcastFlagAnySender = 1 const _CertFlagAllowFEC = 1 @@ -140,11 +136,8 @@ func (a *ADNLWrapper) customHandler(msg *adnl.MessageCustom) error { switch t := obj.(type) { case Broadcast: - var gh any - _, _ = tl.Parse(&gh, t.Data, true) - println("BROADCAST", reflect.TypeOf(gh).String()) case BroadcastFECShort: - println("BROADCAST SHORT", t.Seqno, t.BroadcastHash) + // TODO: case BroadcastFEC: if err := o.processFECBroadcast(&t); err != nil { return fmt.Errorf("failed to process FEC broadcast: %w", err) diff --git a/adnl/overlay/manager-rldp.go b/adnl/overlay/manager-rldp.go index 8612bd61..eaee404f 100644 --- a/adnl/overlay/manager-rldp.go +++ b/adnl/overlay/manager-rldp.go @@ -12,6 +12,7 @@ import ( type RLDP interface { GetADNL() rldp.ADNL + GetRateInfo() (left int64, total int64) Close() DoQuery(ctx context.Context, maxAnswerSize uint64, query, result tl.Serializable) error DoQueryAsync(ctx context.Context, maxAnswerSize uint64, id []byte, query tl.Serializable, result chan<- rldp.AsyncQueryResult) error diff --git a/adnl/overlay/overlay-adnl.go b/adnl/overlay/overlay-adnl.go index 85575423..7b42a349 100644 --- a/adnl/overlay/overlay-adnl.go +++ b/adnl/overlay/overlay-adnl.go @@ -12,6 +12,7 @@ import ( "github.com/xssnick/tonutils-go/adnl/keys" "github.com/xssnick/tonutils-go/adnl/rldp" "github.com/xssnick/tonutils-go/tl" + "maps" "reflect" "sync" "time" @@ -88,9 +89,7 @@ func (a *ADNLOverlayWrapper) SetAuthorizedKeys(keysWithMaxLen map[string]uint32) // reset and copy a.authorizedKeys = map[string]uint32{} - for k, v := range keysWithMaxLen { - a.authorizedKeys[k] = v - } + maps.Copy(a.authorizedKeys, keysWithMaxLen) } func (a *ADNLWrapper) UnregisterOverlay(id []byte) { @@ -296,12 +295,10 @@ func (a *ADNLOverlayWrapper) processFECBroadcast(t *BroadcastFEC) error { defer stream.mx.Unlock() if stream.finishedAt != nil { - var received tl.Serializable = FECCompleted{ - Hash: broadcastHash, - } - // got packet for a finished stream, let them know that it is received - err := a.ADNL.SendCustomMessage(context.Background(), received) + err := a.ADNL.SendCustomMessage(context.Background(), FECCompleted{ + Hash: broadcastHash, + }) if err != nil { return fmt.Errorf("failed to send overlay fec received message: %w", err) } @@ -312,7 +309,7 @@ func (a *ADNLOverlayWrapper) processFECBroadcast(t *BroadcastFEC) error { tm := time.Now() stream.lastMessageAt = tm - canTryDecode, err := stream.decoder.AddSymbol(uint32(t.Seqno), t.Data) + canTryDecode, err := stream.decoder.AddSymbol(t.Seqno, t.Data) if err != nil { return fmt.Errorf("failed to add raptorq symbol %d: %w", t.Seqno, err) } @@ -351,19 +348,13 @@ func (a *ADNLOverlayWrapper) processFECBroadcast(t *BroadcastFEC) error { return fmt.Errorf("failed to parse decoded broadcast message: %w", err) } - var complete tl.Serializable = FECCompleted{ + _ = a.ADNL.SendCustomMessage(context.Background(), FECCompleted{ Hash: broadcastHash, - } - - err = a.ADNL.SendCustomMessage(context.Background(), complete) - if err != nil { - return fmt.Errorf("failed to send rldp complete message: %w", err) - } + }) if bHandler := a.broadcastHandler; bHandler != nil { // handle result - err = bHandler(res, stream.trusted) - if err != nil { + if err = bHandler(res, stream.trusted); err != nil { return fmt.Errorf("failed to process broadcast message: %w", err) } } diff --git a/adnl/packet.go b/adnl/packet.go index 64b2a45b..ed93d643 100644 --- a/adnl/packet.go +++ b/adnl/packet.go @@ -184,7 +184,7 @@ func (p *PacketContent) Serialize(buf *bytes.Buffer) (int, error) { binary.LittleEndian.PutUint32(tmp, _PacketContentID) buf.Write(tmp) - tl.ToBytesToBuffer(buf, p.Rand1) + _ = tl.ToBytesToBuffer(buf, p.Rand1) var flags uint32 if p.Seqno != nil { @@ -314,10 +314,10 @@ func (p *PacketContent) Serialize(buf *bytes.Buffer) (int, error) { } if p.Signature != nil { - tl.ToBytesToBuffer(buf, p.Signature) + _ = tl.ToBytesToBuffer(buf, p.Signature) } - tl.ToBytesToBuffer(buf, p.Rand2) + _ = tl.ToBytesToBuffer(buf, p.Rand2) return payloadLen, nil } diff --git a/adnl/rldp/bbr2.go b/adnl/rldp/bbr2.go new file mode 100644 index 00000000..b780d235 --- /dev/null +++ b/adnl/rldp/bbr2.go @@ -0,0 +1,571 @@ +package rldp + +import ( + "fmt" + "math" + "sync/atomic" + "time" +) + +var BBRLogger func(a ...any) = nil + +type SendClock struct { + mask uint32 + startedAt int64 + slots []atomic.Uint64 // packed: [seqno:32][t_ms:32] +} + +func NewSendClock(capPow2 int) *SendClock { + if capPow2&(capPow2-1) != 0 { + panic("cap must be power of two") + } + + s := &SendClock{ + startedAt: time.Now().UnixMilli(), + mask: uint32(capPow2 - 1), + slots: make([]atomic.Uint64, capPow2), + } + return s +} + +func pack(seq, ms uint32) uint64 { return (uint64(seq) << 32) | uint64(ms) } +func unpack(v uint64) (seq, ms uint32) { return uint32(v >> 32), uint32(v) } + +func (s *SendClock) OnSend(seq uint32, nowMs int64) { + idx := seq & s.mask + s.slots[idx].Store(pack(seq, uint32(nowMs-s.startedAt))) +} + +func (s *SendClock) SentAt(seq uint32) (ms int64, ok bool) { + idx := seq & s.mask + v := s.slots[idx].Load() + if hi, lo := unpack(v); hi == seq { + return int64(lo) + s.startedAt, true + } + return 0, false +} + +type BBRv2Options struct { + // Time window for bottleneck bandwidth estimation (seconds) + BtlBwWindowSec int + + // Minimum duration of a gain cycle in ProbeBW (ms) + ProbeBwCycleMs int64 + + // Duration of ProbeRTT phase (ms) + ProbeRTTDurationMs int64 + + // MinRTT staleness timeout (ms): enter ProbeRTT if minRTT hasn't been refreshed longer than this + MinRTTExpiryMs int64 + + // Lower and upper bounds for pacing (bytes/sec) + MinRate int64 + MaxRate int64 // 0 = no cap + + // Threshold for "high loss" (fraction) + HighLoss float64 // e.g., 0.02..0.1 + + // Beta factor to shrink inflight_hi when losses are high + Beta float64 // e.g., 0.85 + + // Initial "guessed" RTT if ObserveRTT is unavailable + DefaultRTTMs int64 + + // Minimum ACK window duration (ms) to avoid updating too frequently + MinSampleMs int64 + + Name string +} + +type BBRv2Controller struct { + limiter *TokenBucket + opts BBRv2Options + + // Accumulators for input deltas + _total atomic.Int64 + _recv atomic.Int64 + _samples atomic.Int64 + lastProc atomic.Int64 // unix ms of the last update + + // BBR state + state atomic.Int32 // 0=startup, 1=drain, 2=probebw, 3=probertt + cycleStamp atomic.Int64 // start time of the current gain cycle + cycleIndex atomic.Int32 // index within the gain table + fullBW atomic.Int64 // "full bandwidth" detection + fullBWCount atomic.Int32 + + // Filters and estimates + btlbw atomic.Int64 // bytes/sec (max filter) + minRTT atomic.Int64 // ms + lastRTT atomic.Int64 // ms + minRTTAt atomic.Int64 // unix ms when minRTT was last updated + minRTTProvisional atomic.Bool + inflight atomic.Int64 // target inflight (bytes), roughly BtlBw * minRTT + hiInflight atomic.Int64 + loInflight atomic.Int64 + + // Loss accounting for the current window + lossTotal atomic.Int64 + lossLost atomic.Int64 + lastAckTs atomic.Int64 // unix ms marking the start of the ACK window + + // Current pacing rate (bytes/sec) + pacingRate atomic.Int64 + + appLimited atomic.Bool + + dbgLast atomic.Int64 + + lastBtlBwDecay atomic.Int64 + + lastLossRate atomic.Uint64 + lastSampleTot atomic.Int64 + lastSampleLos atomic.Int64 +} + +func NewBBRv2Controller(l *TokenBucket, o BBRv2Options) *BBRv2Controller { + applyBBRDefaults(&o) + now := nowMs() + c := &BBRv2Controller{ + limiter: l, + opts: o, + } + c.state.Store(0) + c.cycleStamp.Store(now) + c.lastProc.Store(now) + c.lastAckTs.Store(now) + c.lastBtlBwDecay.Store(now) + + if o.MinRate > 0 { + c.pacingRate.Store(o.MinRate) + l.SetRate(o.MinRate) + } + + if o.DefaultRTTMs > 0 { + c.minRTT.Store(o.DefaultRTTMs) + c.minRTTAt.Store(now) + } else { + c.minRTT.Store(25) + c.minRTTAt.Store(now) + } + c.lastRTT.Store(c.minRTT.Load()) + c.minRTTProvisional.Store(true) + + start := l.GetRate() + if start <= 0 { + start = max64(o.MinRate, 1024*64) + } + c.btlbw.Store(start) + c.pacingRate.Store(start) + c.inflight.Store(rateToInflight(start, c.minRTT.Load())) + c.hiInflight.Store(c.inflight.Load()) + c.loInflight.Store(0) + + return c +} + +func applyBBRDefaults(o *BBRv2Options) { + if o.BtlBwWindowSec == 0 { + o.BtlBwWindowSec = 10 + } + if o.ProbeBwCycleMs == 0 { + o.ProbeBwCycleMs = 200 + } + if o.ProbeRTTDurationMs == 0 { + o.ProbeRTTDurationMs = 150 + } + if o.MinRTTExpiryMs == 0 { + o.MinRTTExpiryMs = 10_000 // 10s + } + if o.MinRate == 0 { + o.MinRate = 32 * 1024 // 32 KiB/s + } + if o.HighLoss == 0 { + o.HighLoss = 0.05 // 5% + } + if o.Beta == 0 { + o.Beta = 0.85 + } + if o.DefaultRTTMs == 0 { + o.DefaultRTTMs = 25 + } + if o.MinSampleMs == 0 { + o.MinSampleMs = 25 + } +} + +func (c *BBRv2Controller) SetAppLimited(v bool) { c.appLimited.Store(v) } + +func (c *BBRv2Controller) ObserveDelta(total, recv int64) { + if total == 0 { + return + } + + c._total.Add(total) + c._recv.Add(recv) + c._samples.Add(1) + c.maybeUpdate() +} + +func (c *BBRv2Controller) ObserveRTT(rttMs int64) { + now := nowMs() + old := c.minRTT.Load() + provisional := c.minRTTProvisional.Load() + + if old == 0 || provisional || rttMs < old { + c.minRTT.Store(rttMs) + c.minRTTAt.Store(now) + c.minRTTProvisional.Store(false) + } else if rttMs <= old+max64(1, old/8) { // <= 12.5% from min + c.minRTTAt.Store(now) + } + c.lastRTT.Store(rttMs) + + if btl := c.btlbw.Load(); btl > 0 { + c.inflight.Store(rateToInflight(btl, c.minRTT.Load())) + } +} + +func (c *BBRv2Controller) maybeUpdate() { + now := nowMs() + + last := c.lastProc.Load() + if last+c.opts.MinSampleMs > now { + return + } + if !c.lastProc.CompareAndSwap(last, now) { + return + } + + prevAckTs := c.lastAckTs.Swap(now) + elapsedMs := now - prevAckTs + if elapsedMs < max64(10, c.opts.MinSampleMs/2) { + return + } + + total := c._total.Swap(0) + acked := c._recv.Swap(0) + c._samples.Store(0) + if total <= 0 { + return + } + + lost := total - acked + if lost < 0 { + lost = 0 + } + + c.lastSampleTot.Store(total) + c.lastSampleLos.Store(lost) + + const minAckBytesForLoss = 2 * 1500 // min ~2 MSS confirmed + if total >= minAckBytesForLoss { + c.lossTotal.Add(total) + c.lossLost.Add(lost) + } + + if acked > 0 { + ackRate := int64(float64(acked) * 1000.0 / float64(elapsedMs)) // B/s + c.updateBtlBw(ackRate, now) + } + + c.checkProbeRTT(now, acked) + lossRate := c.updateModelAndRate(now) + + if BBRLogger != nil && now-c.dbgLast.Load() >= 1000 { + c.dbgLast.Store(now) + + var ackRateBps int64 + if elapsedMs > 0 { + ackRateBps = int64(float64(acked) * 1000.0 / float64(elapsedMs)) + } + lossPct := fmt.Sprintf("%.2f%%", lossRate*100.0) + + BBRLogger("[BBR] ", + c.opts.Name, " win elapsed=", elapsedMs, "ms acked="+humanBytes(acked)+" total="+humanBytes(total)+" loss=", lossPct, + "state=", c.state.Load(), "appLimited=", c.appLimited.Load(), "ackRate="+humanBps(ackRateBps)+" pacing="+humanBps(c.pacingRate.Load())+ + " btlbw="+humanBps(c.btlbw.Load())+" minRTT=", c.minRTT.Load(), "ms", + ) + } +} + +func humanBps(bps int64) string { + if bps <= 0 { + return "0 B/s (0 Mbit/s)" + } + miBps := float64(bps) / (1024.0 * 1024.0) // MiB/s + mbps := float64(bps*8) / 1e6 + return fmt.Sprintf("%.2f MB/s (%.2f Mbit/s)", miBps, mbps) +} + +func humanBytes(n int64) string { + const ( + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + ) + switch { + case n >= GiB: + return fmt.Sprintf("%.2f GB", float64(n)/float64(GiB)) + case n >= MiB: + return fmt.Sprintf("%.2f MB", float64(n)/float64(MiB)) + case n >= KiB: + return fmt.Sprintf("%.2f KB", float64(n)/float64(KiB)) + default: + return fmt.Sprintf("%d B", n) + } +} + +func (c *BBRv2Controller) updateBtlBw(sample int64, now int64) { + if sample <= 0 { + return + } + + if !c.appLimited.Load() { + cur := c.btlbw.Load() + if sample > cur { + c.btlbw.Store(sample) + } + + // full bandwidth reached + if cur > 0 { + if float64(sample) < float64(cur)*1.25 { + if c.fullBWCount.Add(1) >= 3 && c.fullBW.Load() == 0 { + c.fullBW.Store(cur) + } + } else { + c.fullBWCount.Store(0) + c.fullBW.Store(0) + c.btlbw.Store(max64(cur, sample)) + } + } + } + + // Soft decay of an overly old max (emulates a time window) + // Every BtlBwWindowSec seconds decrease by 10% if no larger samples arrived + winMs := int64(c.opts.BtlBwWindowSec * 1000) + lastDecay := c.lastBtlBwDecay.Load() + if lastDecay == 0 { + lastDecay = now + } + if lastDecay+winMs < now && c.lastBtlBwDecay.CompareAndSwap(lastDecay, now) { + decayed := int64(float64(c.btlbw.Load()) * 0.9) + if decayed < c.opts.MinRate { + decayed = c.opts.MinRate + } + c.btlbw.Store(decayed) + } +} + +func (c *BBRv2Controller) InflightAllowance(currentBytes int64) int64 { + if currentBytes <= 0 { + currentBytes = 0 + } + + hi := c.hiInflight.Load() + if hi <= 0 { + hi = c.inflight.Load() + } + if hi <= 0 { + minRtt := c.minRTT.Load() + if minRtt <= 0 { + minRtt = c.opts.DefaultRTTMs + } + pacing := c.pacingRate.Load() + if pacing <= 0 { + pacing = c.opts.MinRate + } + hi = rateToInflight(pacing, minRtt) + } + + allowance := hi - currentBytes + if allowance <= 0 { + return 0 + } + return allowance +} + +func (c *BBRv2Controller) CurrentMinRTT() int64 { + return c.minRTT.Load() +} + +func (c *BBRv2Controller) CurrentRTT() int64 { + return c.lastRTT.Load() +} + +func (c *BBRv2Controller) checkProbeRTT(now int64, ackedBytes int64) { + if c.state.Load() != 3 && now-c.minRTTAt.Load() > c.opts.MinRTTExpiryMs && + !c.appLimited.Load() && ackedBytes > 0 { + + c.state.Store(3) + c.cycleStamp.Store(now) + } + + if c.state.Load() == 3 && now-c.cycleStamp.Load() >= c.opts.ProbeRTTDurationMs { + c.state.Store(2) + c.cycleStamp.Store(now) + c.cycleIndex.Store(0) + } +} + +func (c *BBRv2Controller) updateModelAndRate(now int64) float64 { + state := c.state.Load() + bw := c.btlbw.Load() + if bw <= 0 { + bw = c.opts.MinRate + } + + // Update inflight target = bw * minRTT + inflight := rateToInflight(bw, c.minRTT.Load()) + if inflight <= 0 { + inflight = 2 * 1500 // at least two MSS-equivalents + } + c.inflight.Store(inflight) + + // Losses in the last window → decide whether to lower inflight_hi + var lossRate float64 + lt := c.lossTotal.Swap(0) + ll := c.lossLost.Swap(0) + if lt > 0 { + lossRate = float64(ll) / float64(lt) + } + + c.lastLossRate.Store(math.Float64bits(lossRate)) + + // BBRv2: if loss is high — tighten the upper bound inflight_hi BELOW the model + hi := c.hiInflight.Load() + if hi == 0 { + hi = inflight + } + if lossRate >= c.opts.HighLoss { + // multiplicative decrease like BBRv2 + newHi := int64(float64(hi) * c.opts.Beta) + // allow going below the model to drain the queue, but keep a sane floor + floor := max64(2*1500, inflight/2) // >=2*MSS and not below ~0.5*model + if newHi < floor { + newHi = floor + } + c.hiInflight.Store(newHi) + } else { + // Slowly relax upward + relax := hi + max64(inflight/16, 1500) // +~6% or at least one MSS + c.hiInflight.Store(min64(relax, inflight*4)) + } + + // Choose pacing_gain by state + var pacingGain = 1.0 + switch state { + case 0: // Startup + pacingGain = 2.885 // classic BBR startup + // Transition to Drain once "full bandwidth" is reached + if c.fullBW.Load() > 0 { + c.state.Store(1) + c.cycleStamp.Store(now) + } + case 1: // Drain + pacingGain = 1.0 / 2.885 + // Finish drain relatively quickly + if now-c.cycleStamp.Load() >= 200 { + c.state.Store(2) // ProbeBW + c.cycleStamp.Store(now) + c.cycleIndex.Store(0) + } + case 2: // ProbeBW + // Moderate BBRv2 gain cycle: {1.25, 0.75, 1,1,1,1,1,1} + gains := [...]float64{1.25, 0.75, 1, 1, 1, 1, 1, 1} + idx := int(c.cycleIndex.Load()) + if idx < 0 || idx >= len(gains) { + idx = 0 + c.cycleIndex.Store(0) + } + pacingGain = gains[idx] + // Advance the cycle + if now-c.cycleStamp.Load() >= c.opts.ProbeBwCycleMs { + c.cycleStamp.Store(now) + c.cycleIndex.Store(int32((idx + 1) % len(gains))) + } + case 3: // ProbeRTT + pacingGain = 0.5 // send less to probe RTT + } + + // Map inflight_hi into a rate cap (upper bound) + // targetRate = min(bw * pacingGain, hiInflight / minRTT) + targetByGain := float64(bw) * pacingGain + minRtt := max64(c.minRTT.Load(), 1) + hiBytesPerSec := float64(c.hiInflight.Load()) * 1000.0 / float64(minRtt) + target := min64(int64(targetByGain), int64(hiBytesPerSec)) + + prev := c.pacingRate.Load() + + if lossRate >= c.opts.HighLoss { + lossCap := int64(float64(prev) * c.opts.Beta) + if lossCap < c.opts.MinRate { + lossCap = c.opts.MinRate + } + if target > lossCap { + target = lossCap + } + } + + // Lower/upper bounds + if target < c.opts.MinRate { + target = c.opts.MinRate + } + if c.opts.MaxRate > 0 && target > c.opts.MaxRate { + target = c.opts.MaxRate + } + + // Smoothing: limit step changes up/down (except during Startup/ProbeRTT) + maxUp := int64(float64(prev) * 1.5) + maxDown := int64(float64(prev) * 0.7) + if state != 0 && state != 3 { // don't limit in Startup/ProbeRTT + if target > maxUp { + target = maxUp + } + if target < maxDown { + target = maxDown + } + } + + if target <= 0 { + target = c.opts.MinRate + } + + if target != prev { + c.pacingRate.Store(target) + c.limiter.SetRate(target) + } + return lossRate +} + +func (c *BBRv2Controller) LastLossSample() (total, lost int64, rate float64) { + total = c.lastSampleTot.Load() + lost = c.lastSampleLos.Load() + rate = math.Float64frombits(c.lastLossRate.Load()) + return +} + +func rateToInflight(rateBytesPerSec int64, rttMs int64) int64 { + if rateBytesPerSec <= 0 { + return 0 + } + if rttMs <= 0 { + rttMs = 1 + } + return int64(float64(rateBytesPerSec) * float64(rttMs) / 1000.0) +} + +func nowMs() int64 { return time.Now().UnixMilli() } + +func max64(a, b int64) int64 { + if a > b { + return a + } + return b +} +func min64(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/adnl/rldp/bbr2_test.go b/adnl/rldp/bbr2_test.go new file mode 100644 index 00000000..9d26efa6 --- /dev/null +++ b/adnl/rldp/bbr2_test.go @@ -0,0 +1,360 @@ +package rldp + +import ( + "math" + "sync" + "testing" + "time" +) + +func waitUntil(t *testing.T, timeout time.Duration, cond func() bool, msg string) { + t.Helper() + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if cond() { + return + } + time.Sleep(2 * time.Millisecond) + } + t.Fatalf("timeout: %s", msg) +} + +func newBBR(t *testing.T, initRate int64, opts BBRv2Options) (*BBRv2Controller, *TokenBucket) { + t.Helper() + tb := NewTokenBucket(initRate, "test-peer") + return NewBBRv2Controller(tb, opts), tb +} + +func TestBBR_StartupIncreasesRate(t *testing.T) { + opts := BBRv2Options{ + MinRate: 20_000, + MaxRate: 0, + DefaultRTTMs: 20, + MinSampleMs: 10, + BtlBwWindowSec: 2, + ProbeBwCycleMs: 50, + ProbeRTTDurationMs: 40, + MinRTTExpiryMs: 5_000, + HighLoss: 0.05, + Beta: 0.85, + } + bbr, tb := newBBR(t, opts.MinRate, opts) + + for i := 0; i < 60; i++ { + bbr.ObserveDelta(100_000, 100_000) + time.Sleep(12 * time.Millisecond) + } + + waitUntil(t, 2*time.Second, func() bool { + return bbr.pacingRate.Load() > opts.MinRate + }, "pacingRate should increase in Startup") + + if tb.GetRate() != bbr.pacingRate.Load() { + t.Fatalf("limiter rate mismatch: tb=%d bbr=%d", tb.GetRate(), bbr.pacingRate.Load()) + } +} + +func TestBBR_HighLossReducesRate(t *testing.T) { + opts := BBRv2Options{ + MinRate: 50_000, + DefaultRTTMs: 25, + MinSampleMs: 10, + BtlBwWindowSec: 2, + ProbeBwCycleMs: 50, + ProbeRTTDurationMs: 50, + MinRTTExpiryMs: 5_000, + HighLoss: 0.05, + Beta: 0.85, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + for i := 0; i < 12; i++ { + bbr.ObserveDelta(200_000, 200_000) + time.Sleep(12 * time.Millisecond) + } + r1 := bbr.pacingRate.Load() + + for i := 0; i < 8; i++ { + bbr.ObserveDelta(100_000, 80_000) + time.Sleep(12 * time.Millisecond) + } + r2 := bbr.pacingRate.Load() + if r2 >= r1 { + t.Fatalf("expected rate drop on high loss: before=%d after=%d", r1, r2) + } +} + +func TestBBR_ProbeRTT_EnterAndExit(t *testing.T) { + opts := BBRv2Options{ + MinRate: 30_000, + DefaultRTTMs: 10, + MinSampleMs: 10, + BtlBwWindowSec: 2, + ProbeBwCycleMs: 40, + ProbeRTTDurationMs: 30, + MinRTTExpiryMs: 60, + HighLoss: 0.1, + Beta: 0.9, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + // немного трафика + for i := 0; i < 5; i++ { + bbr.ObserveDelta(50_000, 50_000) + time.Sleep(12 * time.Millisecond) + } + + time.Sleep(70 * time.Millisecond) + for i := 0; i < 10 && bbr.state.Load() != 3; i++ { + bbr.ObserveDelta(1_000, 1_000) + time.Sleep(12 * time.Millisecond) + } + if bbr.state.Load() != 3 { + t.Fatalf("should enter ProbeRTT") + } + + for i := 0; i < 20 && bbr.state.Load() != 2; i++ { + bbr.ObserveDelta(1_000, 1_000) + time.Sleep(12 * time.Millisecond) + } + if bbr.state.Load() != 2 { + t.Fatalf("should exit to ProbeBW") + } +} + +func TestBBR_RespectsMinMaxRate(t *testing.T) { + opts := BBRv2Options{ + MinRate: 10_000, + MaxRate: 15_000, + DefaultRTTMs: 10, + MinSampleMs: 10, + ProbeBwCycleMs: 40, + ProbeRTTDurationMs: 40, + MinRTTExpiryMs: 5_000, + HighLoss: 0.05, + Beta: 0.85, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + for i := 0; i < 25; i++ { + bbr.ObserveDelta(1_000_000, 1_000_000) + time.Sleep(12 * time.Millisecond) + } + if got := bbr.pacingRate.Load(); got > opts.MaxRate { + t.Fatalf("rate exceeded MaxRate: got=%d max=%d", got, opts.MaxRate) + } + + for i := 0; i < 5; i++ { + bbr.ObserveDelta(100_000, 20_000) // 80% loss + time.Sleep(12 * time.Millisecond) + } + if got := bbr.pacingRate.Load(); got < opts.MinRate { + t.Fatalf("rate fell below MinRate: got=%d min=%d", got, opts.MinRate) + } +} + +func TestBBR_SmoothingBounds(t *testing.T) { + opts := BBRv2Options{ + MinRate: 80_000, + DefaultRTTMs: 20, + MinSampleMs: 10, + BtlBwWindowSec: 2, + ProbeBwCycleMs: 50, + ProbeRTTDurationMs: 50, + MinRTTExpiryMs: 5_000, + HighLoss: 0.05, + Beta: 0.85, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + for i := 0; i < 10; i++ { + bbr.ObserveDelta(500_000, 500_000) + time.Sleep(12 * time.Millisecond) + } + prev := bbr.pacingRate.Load() + + bbr.ObserveDelta(10_000_000, 10_000_000) + time.Sleep(14 * time.Millisecond) + now := bbr.pacingRate.Load() + if now > int64(float64(prev)*1.55) { + t.Fatalf("up-smoothing failed: prev=%d now=%d", prev, now) + } + + bbr.ObserveDelta(1_000_000, 100_000) // 90% loss + time.Sleep(14 * time.Millisecond) + after := bbr.pacingRate.Load() + if after < int64(float64(now)*0.65) { + t.Fatalf("down-smoothing failed: now=%d after=%d", now, after) + } +} + +func TestBBR_BtlBwDecay(t *testing.T) { + opts := BBRv2Options{ + MinRate: 10_000, + DefaultRTTMs: 20, + MinSampleMs: 10, + BtlBwWindowSec: 1, + ProbeBwCycleMs: 100, + ProbeRTTDurationMs: 100, + MinRTTExpiryMs: 10_000, + HighLoss: 0.2, + Beta: 0.85, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + for i := 0; i < 6; i++ { + bbr.ObserveDelta(200_000, 200_000) + time.Sleep(12 * time.Millisecond) + } + peak := bbr.btlbw.Load() + if peak <= opts.MinRate { + t.Fatalf("unexpected peak btlbw: %d", peak) + } + + winMs := int64(opts.BtlBwWindowSec * 1000) + bbr.lastBtlBwDecay.Store(time.Now().UnixMilli() - winMs - 10) + bbr.ObserveDelta(1, 1) + time.Sleep(12 * time.Millisecond) + + decayed := bbr.btlbw.Load() + if !(decayed < peak && decayed >= opts.MinRate) { + t.Fatalf("expected btlbw decay: before=%d after=%d (min=%d)", peak, decayed, opts.MinRate) + } +} + +func TestBBR_LossSampleTracked(t *testing.T) { + opts := BBRv2Options{ + MinRate: 60_000, + DefaultRTTMs: 20, + MinSampleMs: 10, + BtlBwWindowSec: 2, + ProbeBwCycleMs: 40, + ProbeRTTDurationMs: 40, + MinRTTExpiryMs: 5_000, + HighLoss: 0.05, + Beta: 0.85, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + time.Sleep(12 * time.Millisecond) + bbr.ObserveDelta(400_000, 200_000) + + total, lost, rate := bbr.LastLossSample() + if total <= 0 || lost <= 0 { + t.Fatalf("expected non-zero loss sample, got total=%d lost=%d", total, lost) + } + + expected := float64(lost) / float64(total) + if math.Abs(rate-expected) > 1e-3 { + t.Fatalf("loss rate mismatch: want %.4f got %.4f (total=%d lost=%d)", expected, rate, total, lost) + } +} + +func TestBBR_InflightAllowance(t *testing.T) { + opts := BBRv2Options{ + MinRate: 40_000, + DefaultRTTMs: 20, + MinSampleMs: 10, + } + bbr, _ := newBBR(t, opts.MinRate, opts) + + bbr.hiInflight.Store(60_000) + if got := bbr.InflightAllowance(30_000); got != 30_000 { + t.Fatalf("unexpected allowance: %d", got) + } + + if got := bbr.InflightAllowance(80_000); got != 0 { + t.Fatalf("allowance should clamp at zero, got %d", got) + } + + bbr.hiInflight.Store(0) + bbr.inflight.Store(45_000) + if got := bbr.InflightAllowance(5_000); got != 40_000 { + t.Fatalf("fallback allowance mismatch: %d", got) + } +} + +func approxI64(a, b, tol int64) bool { + d := a - b + if d < 0 { + d = -d + } + return d <= tol +} + +func TestSendClock_OnSendAndSentAt(t *testing.T) { + sc := NewSendClock(1024) // power of two + seq := uint32(42) + + now := time.Now().UnixMilli() + sc.OnSend(seq, now) + + got, ok := sc.SentAt(seq) + if !ok { + t.Fatalf("SentAt(%d) ok=false, want true", seq) + } + // допускаем небольшую разницу (квант времени) + if !approxI64(got, now, 3) { + t.Fatalf("SentAt ms mismatch: got=%d want~=%d", got, now) + } +} + +func TestSendClock_CollisionOverwritesOld(t *testing.T) { + sc := NewSendClock(8) // mask=7 + base := time.Now().UnixMilli() + + seq1 := uint32(10) // 10 & 7 = 2 + seq2 := uint32(18) // 18 & 7 = 2 + + sc.OnSend(seq1, base+1) + sc.OnSend(seq2, base+2) + + if _, ok := sc.SentAt(seq1); ok { + t.Fatalf("expected collision overwrite for seq1=%d", seq1) + } + got2, ok2 := sc.SentAt(seq2) + if !ok2 || got2 != base+2 { + t.Fatalf("seq2 not found or bad ts: ok=%v got=%d want=%d", ok2, got2, base+2) + } +} + +func TestSendClock_RecentWindowVisibleAfterWrap(t *testing.T) { + capPow2 := 16 + sc := NewSendClock(capPow2) + start := time.Now().UnixMilli() + + for i := 0; i < 1000; i++ { + sc.OnSend(uint32(i), start+int64(i)) + } + for i := 1000 - capPow2; i < 1000; i++ { + if _, ok := sc.SentAt(uint32(i)); !ok { + t.Fatalf("recent seq=%d not found", i) + } + } +} + +func TestSendClock_ConcurrentRaces(t *testing.T) { + sc := NewSendClock(4096) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + base := time.Now().UnixMilli() + for i := uint32(1); i < 50000; i++ { + sc.OnSend(i, base+int64(i%5000)) + } + }() + + for r := 0; r < 4; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := uint32(1); i < 50000; i++ { + sc.SentAt(i) + } + }() + } + + wg.Wait() +} diff --git a/adnl/rldp/bucket.go b/adnl/rldp/bucket.go index 2c9ef847..61a7b9b0 100644 --- a/adnl/rldp/bucket.go +++ b/adnl/rldp/bucket.go @@ -5,36 +5,57 @@ import ( "time" ) +// TokenBucket bytes/sec type TokenBucket struct { ratePerSec int64 capacity int64 - tokens int64 - lastRefill int64 + + lastRefill int64 // UnixMicro peerName string } -func NewTokenBucket(rate int64, peerName string) *TokenBucket { +// NewTokenBucket create bucket with bytes/sec. +func NewTokenBucket(bps int64, peerName string) *TokenBucket { + if bps < 1 { + bps = 1 + } + x := bps * 1000 // burst 1 sec + return &TokenBucket{ - ratePerSec: rate * 1000, - capacity: rate * 1000, - tokens: rate * 1000, + ratePerSec: x, + capacity: x, + tokens: x, lastRefill: time.Now().UnixMicro(), peerName: peerName, } } -func (tb *TokenBucket) SetRate(pps int64) { - if pps < 50 { - pps = 50 - } else if pps > 5000000 { - pps = 5000000 +func (tb *TokenBucket) SetCapacityBytes(burstBytes int64) { + if burstBytes < 0 { + burstBytes = 0 + } + atomic.StoreInt64(&tb.capacity, burstBytes*1000) +} + +func (tb *TokenBucket) SetRate(bps int64) { + if bps < 8<<10 { // 8KB/s + bps = 8 << 10 + } else if bps > 500<<20 { // 500 MB/s + bps = 500 << 20 } + atomic.StoreInt64(&tb.ratePerSec, bps*1000) - atomic.StoreInt64(&tb.ratePerSec, pps*1000) - atomic.StoreInt64(&tb.capacity, pps*1000) - Logger("[RLDP] Peer rate updated:", tb.peerName, pps) + curCap := atomic.LoadInt64(&tb.capacity) + curRate := atomic.LoadInt64(&tb.ratePerSec) + + // if cap ~= old rate, use new + if abs64(curCap-curRate) < curRate/64 { // ~1.5% + atomic.StoreInt64(&tb.capacity, curRate) + } + + Logger("[RLDP] Peer pacing updated (Bps):", tb.peerName, bps) } func (tb *TokenBucket) GetRate() int64 { @@ -45,7 +66,12 @@ func (tb *TokenBucket) GetTokensLeft() int64 { return atomic.LoadInt64(&tb.tokens) / 1000 } -func (tb *TokenBucket) TryConsume() bool { +func (tb *TokenBucket) ConsumeUpTo(maxBytes int) int { + if maxBytes <= 0 { + return 0 + } + req := int64(maxBytes) + for { now := time.Now().UnixMicro() last := atomic.LoadInt64(&tb.lastRefill) @@ -53,21 +79,58 @@ func (tb *TokenBucket) TryConsume() bool { if elapsed > 0 { add := (elapsed * atomic.LoadInt64(&tb.ratePerSec)) / 1_000_000 - newTokens := atomic.LoadInt64(&tb.tokens) + add - if capacity := atomic.LoadInt64(&tb.capacity); newTokens > capacity { - newTokens = capacity - } - if atomic.CompareAndSwapInt64(&tb.lastRefill, last, now) { - atomic.StoreInt64(&tb.tokens, newTokens) + if add > 0 && atomic.CompareAndSwapInt64(&tb.lastRefill, last, now) { + for { + curr := atomic.LoadInt64(&tb.tokens) + newTokens := curr + add + capacity := atomic.LoadInt64(&tb.capacity) + if newTokens > capacity { + newTokens = capacity + } + + if atomic.CompareAndSwapInt64(&tb.tokens, curr, newTokens) { + break + } + } } } - if currTokens := atomic.LoadInt64(&tb.tokens); currTokens >= 1000 { // micro-tokens - if !atomic.CompareAndSwapInt64(&tb.tokens, currTokens, currTokens-1000) { - continue - } - return true + currTokens := atomic.LoadInt64(&tb.tokens) + availableBytes := currTokens / 1000 + if availableBytes <= 0 { + return 0 + } + + toConsume := req + if availableBytes < toConsume { + toConsume = availableBytes } - return false + + micro := toConsume * 1000 + if atomic.CompareAndSwapInt64(&tb.tokens, currTokens, currTokens-micro) { + return int(toConsume) + } + + // race, repeat + } +} + +func (tb *TokenBucket) ConsumePackets(maxPackets, partSize int) int { + if maxPackets <= 0 || partSize <= 0 { + return 0 + } + wantBytes := int64(maxPackets) * int64(partSize) + gotBytes := tb.ConsumeUpTo(int(wantBytes)) + return gotBytes / partSize +} + +func (tb *TokenBucket) TryConsumeBytes(n int) bool { + return tb.ConsumeUpTo(n) == n +} + +func abs64(x int64) int64 { + if x < 0 { + return -x } + return x } diff --git a/adnl/rldp/bucket_test.go b/adnl/rldp/bucket_test.go new file mode 100644 index 00000000..41da5eb8 --- /dev/null +++ b/adnl/rldp/bucket_test.go @@ -0,0 +1,150 @@ +package rldp + +import ( + "sync" + "sync/atomic" + "testing" + "time" +) + +func wait(ms int) { time.Sleep(time.Duration(ms) * time.Millisecond) } + +func TestTokenBucket_Init(t *testing.T) { + tb := NewTokenBucket(100_000, "peer") // 100 KB/s + if got := tb.GetRate(); got != 100_000 { + t.Fatalf("GetRate=%d want=100000", got) + } + + if got := tb.GetTokensLeft(); got != 100_000 { + t.Fatalf("GetTokensLeft init=%d want=100000", got) + } +} + +func TestTokenBucket_Consume_And_Exhaust(t *testing.T) { + tb := NewTokenBucket(10_000, "peer") // 10 KB/s + if n := tb.ConsumeUpTo(4_000); n != 4_000 { + t.Fatalf("ConsumeUpTo 4k -> %d want 4000", n) + } + if left := tb.GetTokensLeft(); left != 6_000 { + t.Fatalf("left=%d want 6000", left) + } + + if n := tb.ConsumeUpTo(7_000); n != 6_000 { + t.Fatalf("ConsumeUpTo 7k -> %d want 6000 (cap by available)", n) + } + + if left := tb.GetTokensLeft(); left != 0 { + t.Fatalf("left after exhaust=%d want 0", left) + } + + if n := tb.ConsumeUpTo(1_000); n != 0 { + t.Fatalf("ConsumeUpTo when empty -> %d want 0", n) + } +} + +func TestTokenBucket_RefillOverTime(t *testing.T) { + tb := NewTokenBucket(10_000, "peer") // 10 KB/s + _ = tb.ConsumeUpTo(10_000) + if left := tb.GetTokensLeft(); left != 0 { + t.Fatalf("left=%d want 0", left) + } + + wait(120) + + n := tb.ConsumeUpTo(10_000) + if n < 800 || n > 2_000 { + t.Fatalf("refill bytes=%d want around 1200 (800..2000)", n) + } +} + +func TestTokenBucket_SetRate_DownAndUp(t *testing.T) { + tb := NewTokenBucket(40_000, "peer") // 40 KB/s + _ = tb.ConsumeUpTo(40_000) + tb.SetRate(10_000) + wait(110) + n := tb.ConsumeUpTo(10_000) + if n < 700 || n > 2_000 { + t.Fatalf("after downrate refill=%d want ~1100 (700..2000)", n) + } + + tb.SetRate(200_000) + _ = tb.ConsumeUpTo(200_000) + wait(100) // ~20_000 B + n2 := tb.ConsumeUpTo(1_000_000) + if n2 < 12_000 || n2 > 40_000 { + t.Fatalf("after uprate refill=%d want ~20000 (12000..40000)", n2) + } +} + +func TestTokenBucket_SetCapacityBytes_Burst(t *testing.T) { + tb := NewTokenBucket(50_000, "peer") + tb.SetCapacityBytes(10_000) + + _ = tb.ConsumeUpTo(1_000_000) + wait(250) + got := tb.ConsumeUpTo(1_000_000) + if got < 9_000 || got > 10_000 { + t.Fatalf("burst-capped consume=%d want ~10k (9000..10000)", got) + } +} + +func TestTokenBucket_ConsumePackets(t *testing.T) { + tb := NewTokenBucket(12_000, "peer") // 12 kB/s + gotPk := tb.ConsumePackets(100, 1_200) + if gotPk != 10 { // 12k / 1.2k = 10 + t.Fatalf("ConsumePackets first=%d want 10", gotPk) + } + + wait(105) + + gotPk2 := tb.ConsumePackets(100, 1_200) + if gotPk2 != 1 { + t.Fatalf("ConsumePackets after refill=%d want 1", gotPk2) + } +} + +func TestTokenBucket_TryConsumeBytes(t *testing.T) { + tb := NewTokenBucket(5_000, "peer") + if ok := tb.TryConsumeBytes(512); !ok { + t.Fatalf("TryConsumeBytes(512) = false, want true") + } + + left := tb.GetTokensLeft() + if left < 4_400 || left > 4_500 { + t.Fatalf("left ~ 4488.., got %d", left) + } + + if ok := tb.TryConsumeBytes(10_000); ok { + t.Fatalf("TryConsumeBytes big should be false") + } +} + +func TestTokenBucket_ParallelConsume_NoOveruse(t *testing.T) { + tb := NewTokenBucket(100_000, "peer") // 100k B/s + testDur := 100 * time.Millisecond + start := time.Now() + var consumed atomic.Int64 + + wg := sync.WaitGroup{} + workers := 8 + wg.Add(workers) + for w := 0; w < workers; w++ { + go func() { + defer wg.Done() + for time.Since(start) < testDur { + n := tb.ConsumeUpTo(1_500) + if n > 0 { + consumed.Add(int64(n)) + } else { + time.Sleep(200 * time.Microsecond) + } + } + }() + } + wg.Wait() + + got := consumed.Load() + if got > 120_000 { + t.Fatalf("parallel consumed=%d exceeds expected ~<=120000", got) + } +} diff --git a/adnl/rldp/client.go b/adnl/rldp/client.go index af65c137..c88edfa5 100644 --- a/adnl/rldp/client.go +++ b/adnl/rldp/client.go @@ -7,14 +7,17 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/xssnick/raptorq" - "github.com/xssnick/tonutils-go/adnl" - "github.com/xssnick/tonutils-go/tl" - "math/bits" + "github.com/xssnick/tonutils-go/adnl/rldp/roundrobin" + "math" "reflect" + "sort" "sync" "sync/atomic" "time" + + "github.com/xssnick/raptorq" + "github.com/xssnick/tonutils-go/adnl" + "github.com/xssnick/tonutils-go/tl" ) type ADNL interface { @@ -30,21 +33,36 @@ type ADNL interface { var Logger = func(a ...any) {} -var PartSize = uint32(1 << 20) +var PartSize = uint32(256 << 10) + +var MultiFECMode = false // TODO: activate after some versions +var RoundRobinFECLimit = 50 * DefaultSymbolSize + +type fecEncoder interface { + GenSymbol(id uint32) []byte +} type activeTransferPart struct { - encoder *raptorq.Encoder + encoder fecEncoder seqno uint32 index uint32 - feq FECRaptorQ + fec FEC + fecSymbolSize uint32 + fecSymbolsCount uint32 + lastConfirmRecvProcessed uint32 lastConfirmSeqnoProcessed uint32 - lastConfirmSeqno uint32 lastConfirmAt int64 lastRecoverAt int64 nextRecoverDelay int64 + fastSeqnoTill uint32 + recoveryReady atomic.Int32 + lossEWMA atomic.Uint64 + drrDeficit int64 + + sendClock *SendClock transfer *activeTransfer } @@ -52,10 +70,12 @@ type activeTransferPart struct { type activeTransfer struct { id []byte data []byte + totalSize uint64 timeoutAt int64 - currentPart atomic.Pointer[activeTransferPart] - rldp *RLDP + nextPartIndex uint32 + currentPart atomic.Pointer[activeTransferPart] + rldp *RLDP mx sync.Mutex } @@ -72,7 +92,7 @@ type expectedTransfer struct { type RLDP struct { adnl ADNL - useV2 bool + useV2 atomic.Int32 activateRecoverySender chan bool activeRequests map[string]*activeRequest @@ -91,18 +111,24 @@ type RLDP struct { packetsSz uint64 rateLimit *TokenBucket - - lastNetworkProcessAt int64 - lastNetworkPacketsRecv int64 - lastNetworkBatchesNum int64 + rateCtrl *BBRv2Controller lastReport time.Time } +type fecDecoder interface { + AddSymbol(id uint32, data []byte) (bool, error) + Decode() (bool, []byte, error) +} + type decoderStreamPart struct { index uint32 - decoder *raptorq.Decoder + decoder fecDecoder + + fecDataSize uint32 + fecSymbolSize uint32 + fecSymbolsCount uint32 lastCompleteSeqno uint32 maxSeqno uint32 @@ -119,21 +145,26 @@ type decoderStream struct { lastMessageAt time.Time currentPart decoderStreamPart - messages chan *MessagePart + /// messages chan *MessagePart + msgBuf *Queue totalSize uint64 - buf []byte + + parts [][]byte + partsSize uint64 mx sync.Mutex } -var MaxUnexpectedTransferSize uint64 = 1 << 16 // 64 KB -var MaxFECDataSize uint64 = 2 << 20 // 2 MB -var DefaultFECDataSize uint64 = 1 << 20 // 1 MB +var MaxUnexpectedTransferSize uint64 = 64 << 10 // 64 KB +var MaxFECDataSize uint32 = 2 << 20 // 2 MB var DefaultSymbolSize uint32 = 768 const _MTU = 1 << 37 +var MinRateBytesSec = int64(1 << 20) +var MaxRateBytesSec = int64(512 << 20) + func NewClient(a ADNL) *RLDP { r := &RLDP{ adnl: a, @@ -142,9 +173,23 @@ func NewClient(a ADNL) *RLDP { recvStreams: map[string]*decoderStream{}, expectedTransfers: map[string]*expectedTransfer{}, activateRecoverySender: make(chan bool, 1), - rateLimit: NewTokenBucket(3000, a.RemoteAddr()), + rateLimit: NewTokenBucket(1<<20, a.RemoteAddr()), } + r.rateCtrl = NewBBRv2Controller(r.rateLimit, BBRv2Options{ + Name: r.adnl.RemoteAddr(), + MinRate: MinRateBytesSec, + MaxRate: MaxRateBytesSec, + HighLoss: 0.25, + Beta: 0.9, + DefaultRTTMs: 25, + BtlBwWindowSec: 10, + ProbeBwCycleMs: 200, + ProbeRTTDurationMs: 200, + MinRTTExpiryMs: 20_000, + MinSampleMs: 50, + }) + a.SetCustomMessageHandler(r.handleMessage) go r.recoverySender() @@ -153,7 +198,7 @@ func NewClient(a ADNL) *RLDP { func NewClientV2(a ADNL) *RLDP { c := NewClient(a) - c.useV2 = true + c.useV2.Store(1) return c } @@ -198,13 +243,15 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { isV2 = false } + prevUseV2 := r.useV2.Load() == 1 + if isV2 && !prevUseV2 { + r.useV2.Store(1) + } else if !isV2 && prevUseV2 { + r.useV2.Store(0) + } + switch m := msg.Data.(type) { case MessagePart: - fec, ok := m.FecType.(FECRaptorQ) - if !ok { - return fmt.Errorf("not supported fec type") - } - tm := time.Now() id := string(m.TransferID) @@ -228,18 +275,18 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { return fmt.Errorf("too big transfer size %d, max allowed %d", m.TotalSize, maxTransferSize) } - if m.TotalSize < uint64(fec.DataSize) { - return fmt.Errorf("bad rldp total size %d, expected at least %d", m.TotalSize, fec.DataSize) + qsz := int(m.FecType.GetSymbolsCount()) + 3 + if qsz > 1024 { + qsz = 1024 } stream = &decoderStream{ lastMessageAt: tm, - messages: make(chan *MessagePart, 256), + msgBuf: NewQueue(qsz), currentPart: decoderStreamPart{ index: 0, }, totalSize: m.TotalSize, - buf: make([]byte, 0, m.TotalSize), } r.mx.Lock() @@ -252,11 +299,8 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { r.mx.Unlock() } - select { - case stream.messages <- &m: - // put message to queue in case it will be locked by other processor - default: - } + // put a message to queue in case it will be locked by another processor + stream.msgBuf.Enqueue(&m) if !stream.mx.TryLock() { return nil @@ -264,202 +308,242 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { defer stream.mx.Unlock() for { - var part *MessagePart - select { - case part = <-stream.messages: - default: + part, ok := stream.msgBuf.Dequeue() + if !ok { return nil } - if stream.finishedAt != nil || stream.currentPart.index > part.Part { - if stream.currentPart.lastCompleteAt.Add(10 * time.Millisecond).Before(tm) { // to not send completions too often - var complete tl.Serializable = Complete{ - TransferID: part.TransferID, - Part: part.Part, + err := func() error { + if stream.finishedAt != nil || stream.currentPart.index > part.Part { + if stream.currentPart.lastCompleteAt.Add(10 * time.Millisecond).Before(tm) { // to not send completions too often + var complete tl.Serializable = Complete{ + TransferID: part.TransferID, + Part: part.Part, + } + + if isV2 { + complete = CompleteV2(complete.(Complete)) + } + + // got packet for a finished part, let them know that it is completed, again + err := r.adnl.SendCustomMessage(context.Background(), complete) + if err != nil { + return fmt.Errorf("failed to send rldp complete message: %w", err) + } + stream.currentPart.lastCompleteAt = tm } + return nil + } - if isV2 { - complete = CompleteV2(complete.(Complete)) + if part.Part > stream.currentPart.index { + return fmt.Errorf("received out of order part %d, expected %d", part.Part, stream.currentPart.index) + } + if part.TotalSize != stream.totalSize { + return fmt.Errorf("received part with bad total size %d, expected %d", part.TotalSize, stream.totalSize) + } + + if stream.currentPart.decoder == nil { + var decoderType uint32 + switch m.FecType.(type) { + case FECRaptorQ: + decoderType = 0 + case FECRoundRobin: + decoderType = 1 + default: + return fmt.Errorf("not supported fec type") } - // got packet for a finished part, let them know that it is completed, again - // TODO: just mark to auto send later? - err := r.adnl.SendCustomMessage(context.Background(), complete) - if err != nil { - return fmt.Errorf("failed to send rldp complete message: %w", err) + if m.TotalSize < uint64(m.FecType.GetDataSize()) { + return fmt.Errorf("bad rldp total size %d, expected at least %d", m.TotalSize, m.FecType.GetDataSize()) } - stream.currentPart.lastCompleteAt = tm - } - return nil - } - if part.Part > stream.currentPart.index { - return fmt.Errorf("received out of order part %d, expected %d", part.Part, stream.currentPart.index) - } - if part.TotalSize != stream.totalSize { - return fmt.Errorf("received part with bad total size %d, expected %d", part.TotalSize, stream.totalSize) - } + if uint64(m.FecType.GetDataSize()) > stream.totalSize || m.FecType.GetDataSize() > MaxFECDataSize || + m.FecType.GetSymbolSize() == 0 || m.FecType.GetSymbolsCount() == 0 { + return fmt.Errorf("invalid fec") + } - if stream.currentPart.decoder == nil { - fec, ok := part.FecType.(FECRaptorQ) - if !ok { - return fmt.Errorf("not supported fec type in part: %d", part.Part) - } + var err error + var dec fecDecoder + if decoderType == 0 { + dec, err = raptorq.NewRaptorQ(m.FecType.GetSymbolSize()).CreateDecoder(m.FecType.GetDataSize()) + if err != nil { + return fmt.Errorf("failed to init raptorq decoder: %w", err) + } + } else { + dec, err = roundrobin.NewDecoder(m.FecType.GetSymbolSize(), m.FecType.GetDataSize()) + if err != nil { + return fmt.Errorf("failed to init round robin decoder: %w", err) + } + } + + stream.currentPart.fecDataSize = m.FecType.GetDataSize() + stream.currentPart.fecSymbolSize = m.FecType.GetSymbolSize() + stream.currentPart.fecSymbolsCount = m.FecType.GetSymbolsCount() + stream.currentPart.decoder = dec - if uint64(fec.DataSize) > stream.totalSize || fec.DataSize > uint32(MaxFECDataSize) || - fec.SymbolSize == 0 || fec.SymbolsCount == 0 { - return fmt.Errorf("invalid fec") + Logger("[ID]", hex.EncodeToString(part.TransferID), "[RLDP] created decoder for part:", part.Part, "data size:", stream.currentPart.fecDataSize, "symbol size:", stream.currentPart.fecSymbolSize, "symbols:", stream.currentPart.fecSymbolsCount) } - dec, err := raptorq.NewRaptorQ(fec.SymbolSize).CreateDecoder(fec.DataSize) + canTryDecode, err := stream.currentPart.decoder.AddSymbol(part.Seqno, part.Data) if err != nil { - return fmt.Errorf("failed to init raptorq decoder: %w", err) + return fmt.Errorf("failed to add raptorq symbol %d: %w", part.Seqno, err) } - stream.currentPart.decoder = dec - Logger("[ID]", hex.EncodeToString(part.TransferID), "[RLDP] created decoder for part:", part.Part, "data size:", fec.DataSize, "symbol size:", fec.SymbolSize, "symbols:", fec.SymbolsCount) - } - canTryDecode, err := stream.currentPart.decoder.AddSymbol(part.Seqno, part.Data) - if err != nil { - return fmt.Errorf("failed to add raptorq symbol %d: %w", part.Seqno, err) - } + stream.lastMessageAt = tm + stream.currentPart.receivedNum++ + + if canTryDecode { + tmd := time.Now() + decoded, data, err := stream.currentPart.decoder.Decode() + if err != nil { + return fmt.Errorf("failed to decode raptorq packet: %w", err) + } - stream.lastMessageAt = tm - stream.currentPart.receivedNum++ + // it may not be decoded due to an unsolvable math system, it means we need more symbols + if decoded { + Logger("[RLDP] v2:", isV2, "part", part.Part, "decoded on seqno", part.Seqno, "symbols:", stream.currentPart.fecSymbolsCount, "decode took", time.Since(tmd).String()) - if canTryDecode { - tmd := time.Now() - decoded, data, err := stream.currentPart.decoder.Decode() - if err != nil { - return fmt.Errorf("failed to decode raptorq packet: %w", err) - } + stream.currentPart = decoderStreamPart{ + index: stream.currentPart.index + 1, + } - // it may not be decoded due to unsolvable math system, it means we need more symbols - if decoded { - Logger("[RLDP] part", part.Part, "decoded on seqno", part.Seqno, "symbols:", fec.SymbolsCount, "decode took", time.Since(tmd).String()) + if len(data) > 0 { + stream.parts = append(stream.parts, data) + stream.partsSize += uint64(len(data)) + } - stream.currentPart = decoderStreamPart{ - index: stream.currentPart.index + 1, - } - stream.buf = append(stream.buf, data...) + var complete tl.Serializable = Complete{ + TransferID: part.TransferID, + Part: part.Part, + } - var complete tl.Serializable = Complete{ - TransferID: part.TransferID, - Part: part.Part, - } + // drop unprocessed messages related to this part + stream.msgBuf.Drain() - if isV2 { - complete = CompleteV2(complete.(Complete)) - } - _ = r.adnl.SendCustomMessage(context.Background(), complete) + if isV2 { + complete = CompleteV2(complete.(Complete)) + } + _ = r.adnl.SendCustomMessage(context.Background(), complete) - if uint64(len(stream.buf)) >= stream.totalSize { - stream.finishedAt = &tmd + if stream.partsSize >= stream.totalSize { + stream.finishedAt = &tmd + stream.currentPart.decoder = nil - r.mx.Lock() - if len(r.recvStreams) > 100 { + r.mx.Lock() for sID, s := range r.recvStreams { - // remove streams that was finished more than 30 sec ago or when it was no messages for more than 60 seconds. - if s.lastMessageAt.Add(60*time.Second).Before(tm) || - (s.finishedAt != nil && s.finishedAt.Add(30*time.Second).Before(tm)) { + // remove streams that was finished more than 15 sec ago or when it was no messages for more than 30 seconds. + if s.lastMessageAt.Add(30*time.Second).Before(tm) || + (s.finishedAt != nil && s.finishedAt.Add(15*time.Second).Before(tm)) { delete(r.recvStreams, sID) } } - } - r.mx.Unlock() + r.mx.Unlock() - if uint64(len(stream.buf)) > stream.totalSize { - return fmt.Errorf("received more data than expected, expected %d, got %d", stream.totalSize, len(stream.buf)) - } + if stream.partsSize > stream.totalSize { + return fmt.Errorf("received more data than expected, expected %d, got %d", stream.totalSize, stream.partsSize) + } + buf := make([]byte, stream.totalSize) + off := 0 + for _, p := range stream.parts { + off += copy(buf[off:], p) + } + stream.parts = nil + stream.partsSize = 0 - var res any - if _, err = tl.Parse(&res, stream.buf, true); err != nil { - return fmt.Errorf("failed to parse custom message: %w", err) - } + var res any + if _, err = tl.Parse(&res, buf, true); err != nil { + return fmt.Errorf("failed to parse custom message: %w", err) + } - Logger("[RLDP] stream finished and parsed, processing transfer data", hex.EncodeToString(part.TransferID)) + Logger("[RLDP] stream finished and parsed, processing transfer data", hex.EncodeToString(part.TransferID)) - switch rVal := res.(type) { - case Query: - handler := r.onQuery - if handler != nil { - transferId := make([]byte, 32) - copy(transferId, part.TransferID) + switch rVal := res.(type) { + case Query: + handler := r.onQuery + if handler != nil { + transferId := make([]byte, 32) + copy(transferId, part.TransferID) - if err = handler(transferId, &rVal); err != nil { - Logger("failed to handle query: ", err) + if err = handler(transferId, &rVal); err != nil { + Logger("failed to handle query: ", err) + } } - } - case Answer: - qid := string(rVal.ID) - - r.mx.Lock() - req := r.activeRequests[qid] - if req != nil { - delete(r.activeRequests, qid) - delete(r.expectedTransfers, id) - } - r.mx.Unlock() + case Answer: + qid := string(rVal.ID) + + r.mx.Lock() + req := r.activeRequests[qid] + if req != nil { + delete(r.activeRequests, qid) + delete(r.expectedTransfers, id) + } + r.mx.Unlock() - if req != nil { - queryId := make([]byte, 32) - copy(queryId, rVal.ID) + if req != nil { + queryId := make([]byte, 32) + copy(queryId, rVal.ID) - // if channel is full we sacrifice processing speed, responses better - req.result <- AsyncQueryResult{ - QueryID: queryId, - Result: rVal.Data, + // if a channel is full, we sacrifice processing speed, responses better + req.result <- AsyncQueryResult{ + QueryID: queryId, + Result: rVal.Data, + } } + default: + Logger("[RLDP] skipping unwanted rldp message of type", reflect.TypeOf(res).String()) } - default: - Logger("[RLDP] skipping unwanted rldp message of type", reflect.TypeOf(res).String()) } + return nil + } else { + Logger("[RLDP] part ", part.Part, "decode attempt failure on seqno", part.Seqno, "symbols:", stream.currentPart.fecSymbolsCount, "decode took", time.Since(tmd).String()) } - return nil - } else { - Logger("[RLDP] part ", part.Part, "decode attempt failure on seqno", part.Seqno, "symbols:", fec.SymbolsCount, "decode took", time.Since(tmd).String()) } - } - if part.Seqno > stream.currentPart.maxSeqno { - diff := part.Seqno - stream.currentPart.maxSeqno - if diff >= 32 { - stream.currentPart.receivedMask = 0 - } else { - stream.currentPart.receivedMask <<= diff + if part.Seqno > stream.currentPart.maxSeqno { + diff := part.Seqno - stream.currentPart.maxSeqno + if diff >= 32 { + stream.currentPart.receivedMask = 0 + } else { + stream.currentPart.receivedMask <<= diff + } + stream.currentPart.maxSeqno = part.Seqno } - stream.currentPart.maxSeqno = part.Seqno - } - if offset := stream.currentPart.maxSeqno - part.Seqno; offset < 32 { - stream.currentPart.receivedMask |= 1 << offset - } + if offset := stream.currentPart.maxSeqno - part.Seqno; offset < 32 { + stream.currentPart.receivedMask |= 1 << offset + } - // send confirm for each 10 packets or after 20 ms - if stream.currentPart.receivedNum-stream.currentPart.receivedNumConfirmed >= 10 || - stream.currentPart.lastConfirmAt.Add(20*time.Millisecond).Before(tm) { - var confirm tl.Serializable - if isV2 { - confirm = ConfirmV2{ - TransferID: part.TransferID, - Part: part.Part, - MaxSeqno: stream.currentPart.maxSeqno, - ReceivedMask: stream.currentPart.receivedMask, - ReceivedCount: stream.currentPart.receivedNum, + // send confirm for each 10 packets or after 20 ms + if stream.currentPart.receivedNum-stream.currentPart.receivedNumConfirmed >= 10 || + stream.currentPart.lastConfirmAt.Add(20*time.Millisecond).Before(tm) { + var confirm tl.Serializable + if isV2 { + confirm = ConfirmV2{ + TransferID: part.TransferID, + Part: part.Part, + MaxSeqno: stream.currentPart.maxSeqno, + ReceivedMask: stream.currentPart.receivedMask, + ReceivedCount: stream.currentPart.receivedNum, + } + } else { + confirm = Confirm{ + TransferID: part.TransferID, + Part: part.Part, + Seqno: stream.currentPart.maxSeqno, + } } - } else { - confirm = Confirm{ - TransferID: part.TransferID, - Part: part.Part, - Seqno: stream.currentPart.maxSeqno, + // we don't care in case of error, not so critical + err = r.adnl.SendCustomMessage(context.Background(), confirm) + if err == nil { + stream.currentPart.receivedNumConfirmed = stream.currentPart.receivedNum + stream.currentPart.lastConfirmAt = tm } } - // we don't care in case of error, not so critical - err = r.adnl.SendCustomMessage(context.Background(), confirm) - if err == nil { - stream.currentPart.receivedNumConfirmed = stream.currentPart.receivedNum - stream.currentPart.lastConfirmAt = tm - } + + return nil + }() + if err != nil { + Logger("[RLDP] transfer", hex.EncodeToString(part.TransferID), "process msg part:", part.Part, "error:", err.Error()) } } case Complete: // receiver has fully received transfer part, send new part or close our stream if done @@ -512,59 +596,101 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { } part := t.getCurrentPart() - if part == nil { + if part == nil || part.index != m.Part { break } - for { // guaranteed replace to higher val - if oldSeq := atomic.LoadUint32(&part.lastConfirmSeqno); oldSeq < m.MaxSeqno { - if !atomic.CompareAndSwapUint32(&part.lastConfirmSeqno, oldSeq, m.MaxSeqno) { + if isV2 { + for { + prevSeq := atomic.LoadUint32(&part.lastConfirmSeqnoProcessed) + prevRecv := atomic.LoadUint32(&part.lastConfirmRecvProcessed) + + advancedSeq := m.MaxSeqno > prevSeq + advancedRecv := m.ReceivedCount > prevRecv + if !advancedSeq && !advancedRecv { + break + } + + if advancedSeq && !atomic.CompareAndSwapUint32(&part.lastConfirmSeqnoProcessed, prevSeq, m.MaxSeqno) { continue } - // replaced - lastProc := atomic.LoadUint32(&part.lastConfirmSeqnoProcessed) - if isV2 && lastProc+32 <= m.MaxSeqno && - atomic.CompareAndSwapUint32(&part.lastConfirmSeqnoProcessed, lastProc, m.MaxSeqno) { + if advancedRecv { + atomic.StoreUint32(&part.lastConfirmRecvProcessed, m.ReceivedCount) + } - nowMs := time.Now().UnixMilli() + var seqDelta int64 + if advancedSeq { + seqDelta = int64(m.MaxSeqno - prevSeq) + } - lastAt := atomic.LoadInt64(&r.lastNetworkProcessAt) - packetsRecv := atomic.AddInt64(&r.lastNetworkPacketsRecv, int64(bits.OnesCount32(m.ReceivedMask))) - batches := atomic.AddInt64(&r.lastNetworkBatchesNum, 1) + var recvDelta int64 + if advancedRecv { + recvDelta = int64(m.ReceivedCount - prevRecv) + } + + if seqDelta < 0 { + seqDelta = 0 + } + if recvDelta < 0 { + recvDelta = 0 + } + + totalDelta := seqDelta + if totalDelta < recvDelta { + totalDelta = recvDelta + } + if totalDelta <= 0 { + break + } + + if tms, ok := part.sendClock.SentAt(m.MaxSeqno); ok { + r.rateCtrl.ObserveRTT(time.Now().UnixMilli() - tms) + } - rate := r.rateLimit.GetRate() + r.rateCtrl.ObserveDelta(totalDelta*int64(part.fecSymbolSize), recvDelta*int64(part.fecSymbolSize)) - // boost when low rate, and slowdown checks when high - delay := 10 + rate/2000 - if delay < 10 { - delay = 10 - } else if delay > 500 { - delay = 500 + loss := 1.0 + if totalDelta > 0 { + loss = 1.0 - float64(recvDelta)/float64(totalDelta) + if loss < 0 { + loss = 0 } + if loss > 1 { + loss = 1 + } + } - if batches >= 3 && lastAt+delay <= nowMs && atomic.CompareAndSwapInt64(&r.lastNetworkBatchesNum, batches, 0) { - atomic.StoreInt64(&r.lastNetworkProcessAt, nowMs) - atomic.AddInt64(&r.lastNetworkPacketsRecv, -packetsRecv) + const alpha = 0.2 + prev := math.Float64frombits(part.lossEWMA.Load()) + if prev == 0 { + prev = loss + } + ew := prev*(1-alpha) + loss*alpha + part.lossEWMA.Store(math.Float64bits(ew)) + + // (3% base + 1.5 * loss), limit 30% + k := float64(part.fecSymbolsCount) + overhead := 0.03 + 1.5*ew + if overhead < 0.01 { + overhead = 0.01 + } - ratio := float64(packetsRecv) / float64(batches*32) + if overhead > 0.30 { + overhead = 0.30 + } - tokens := r.rateLimit.GetTokensLeft() + target := uint32(k + math.Ceil(k*overhead)) - if ratio >= 0.95 { - if tokens < (rate/3)*2 { - r.rateLimit.SetRate(rate + rate/10) // +10% - } - } else if ratio < 0.35 { - r.rateLimit.SetRate(rate - rate/10) // -10% - } else if ratio < 0.75 { - r.rateLimit.SetRate(rate - rate/20) // -5% - } - } + cur := atomic.LoadUint32(&part.fastSeqnoTill) + if target > cur { + atomic.StoreUint32(&part.fastSeqnoTill, target) } + + break } - break } + atomic.StoreInt64(&part.lastConfirmAt, time.Now().UnixMilli()) default: return fmt.Errorf("unexpected message type %s", reflect.TypeOf(m).String()) @@ -573,6 +699,10 @@ func (r *RLDP) handleMessage(msg *adnl.MessageCustom) error { return nil } +func (r *RLDP) GetRateInfo() (left int64, total int64) { + return r.rateLimit.GetTokensLeft(), r.rateLimit.GetRate() +} + func (r *RLDP) recoverySender() { transfersToProcess := make([]*activeTransferPart, 0, 128) timedOut := make([]*activeTransfer, 0, 32) @@ -582,11 +712,21 @@ func (r *RLDP) recoverySender() { ticker := time.NewTicker(1 * time.Millisecond) defer ticker.Stop() + // round-robin head for fair recovery + var rrHead uint32 + + active := false for { select { case <-closerCtx.Done(): return + case <-r.activateRecoverySender: + active = true case <-ticker.C: + if !active { + break + } + if r.lastReport.Before(time.Now().Add(-10 * time.Second)) { r.lastReport = time.Now() r.mx.RLock() @@ -611,14 +751,18 @@ func (r *RLDP) recoverySender() { } part := transfer.getCurrentPart() - if part == nil || atomic.LoadUint32(&part.seqno) < part.feq.SymbolsCount { + if part == nil { // no parts or fast symbols not yet sent continue } - if ms-part.lastRecoverAt > part.nextRecoverDelay || - (atomic.LoadUint32(&part.lastConfirmSeqno) >= part.feq.SymbolsCount && - part.lastRecoverAt < atomic.LoadInt64(&part.lastConfirmAt)) { + if part.recoveryReady.Load() == 0 { + continue + } + + if atomic.LoadUint32(&part.seqno) < part.fastSeqnoTill || + ms-part.lastRecoverAt > part.nextRecoverDelay || + part.lastRecoverAt < atomic.LoadInt64(&part.lastConfirmAt) { transfersToProcess = append(transfersToProcess, part) } } @@ -636,56 +780,130 @@ func (r *RLDP) recoverySender() { } if len(r.activeRequests)+len(r.activeTransfers)+len(r.expectedTransfers) == 0 { - // stop ticks to not consume resources - ticker.Stop() + // stop active ticks to not consume resources + active = false } r.mx.RUnlock() - loop: - for _, part := range transfersToProcess { - part.lastRecoverAt = ms - part.nextRecoverDelay = 30 // fixed for now - - numToResend := 1 - if sc := part.feq.SymbolsCount / 100; sc > 1 { // up to 1% - numToResend = int(sc) - } - - seqno := atomic.LoadUint32(&part.seqno) - for i := 0; i < numToResend; i++ { - p := MessagePart{ - TransferID: part.transfer.id, - FecType: part.feq, - Part: part.index, - TotalSize: uint64(len(part.transfer.data)), - Seqno: seqno, - Data: part.encoder.GenSymbol(seqno), + sort.Slice(transfersToProcess, func(i, j int) bool { + // recently confirmed transfers are prioritized + return atomic.LoadInt64(&transfersToProcess[i].lastConfirmAt) > + atomic.LoadInt64(&transfersToProcess[j].lastConfirmAt) + }) + + isV2 := r.useV2.Load() == 1 + n := len(transfersToProcess) + if n > 0 { + start := int(rrHead % uint32(n)) + drained := false + lastServedIdx := -1 + + sendLoop: + for i := 0; i < n; i++ { + idx := (start + i) % n + part := transfersToProcess[idx] + + seqno := atomic.LoadUint32(&part.seqno) + + quantum := int64(1) + if sc := part.fecSymbolsCount / 200; sc > 1 { + quantum = int64(sc) } - seqno++ - var msgPart tl.Serializable = p - if r.useV2 { - msgPart = MessagePartV2(p) + if seqno < part.fastSeqnoTill { + fastDiff := int64(part.fastSeqnoTill - seqno) + if fastDiff > quantum { + quantum = fastDiff + } } - for { - if r.useV2 && !r.rateLimit.TryConsume() { - select { - case <-closerCtx.Done(): - return - case <-time.After(1 * time.Millisecond): - } - continue + part.drrDeficit += quantum + if part.drrDeficit <= 0 { + continue + } + + allow := part.drrDeficit + if allow > int64(math.MaxInt32) { + allow = int64(math.MaxInt32) + } + + ms = time.Now().UnixMilli() + + requested := int(allow) + consumed := r.rateLimit.ConsumePackets(requested, int(part.fecSymbolSize)) + if consumed == 0 { + drained = true + break + } + + prevSeqno := seqno + for j := 0; j < consumed; j++ { + p := MessagePart{ + TransferID: part.transfer.id, + FecType: part.fec, + Part: part.index, + TotalSize: part.transfer.totalSize, + Seqno: seqno, + Data: part.encoder.GenSymbol(seqno), + } + seqno++ + + var msgPart tl.Serializable = p + if isV2 { + msgPart = MessagePartV2(p) } - if err := r.adnl.SendCustomMessage(context.Background(), msgPart); err != nil { + part.sendClock.OnSend(p.Seqno, ms) + if err := r.adnl.SendCustomMessage(closerCtx, msgPart); err != nil { Logger("failed to send recovery message part", p.Seqno, err.Error()) - break loop + drained = true + break sendLoop + } + } + + if seqno > prevSeqno { + sent := int64(seqno - prevSeqno) + part.drrDeficit -= sent + if part.drrDeficit < 0 { + part.drrDeficit = 0 + } + + base := r.rateCtrl.CurrentMinRTT() + if base <= 0 { + base = r.rateCtrl.opts.DefaultRTTMs + if base <= 0 { + base = 25 + } } + + minGap := max64(8, base/4) + maxGap := max64(20, base/2) + + if consumed > 0 { + part.nextRecoverDelay = minGap + } else { + part.nextRecoverDelay = maxGap + } + + part.lastRecoverAt = ms + lastServedIdx = idx + } + atomic.StoreUint32(&part.seqno, seqno) + + if consumed < requested { + drained = true break } } - atomic.StoreUint32(&part.seqno, seqno) + + if lastServedIdx >= 0 { + rrHead = uint32((lastServedIdx + 1) % n) + } + if drained && lastServedIdx < 0 { + rrHead = uint32(start) + } + } else { + rrHead = 0 } if len(timedOut) > 0 || len(timedOutReq) > 0 || len(timedOutExp) > 0 { @@ -702,12 +920,31 @@ func (r *RLDP) recoverySender() { r.mx.Unlock() } + if len(transfersToProcess) == 0 && r.rateLimit.GetTokensLeft() > int64(DefaultSymbolSize)*20 { + r.rateCtrl.SetAppLimited(true) + } else { + r.rateCtrl.SetAppLimited(false) + } + + for i := range transfersToProcess { + transfersToProcess[i] = nil + } transfersToProcess = transfersToProcess[:0] + + for i := range timedOut { + timedOut[i] = nil + } timedOut = timedOut[:0] + + for i := range timedOutReq { + timedOutReq[i] = "" + } timedOutReq = timedOutReq[:0] + + for i := range timedOutExp { + timedOutExp[i] = "" + } timedOutExp = timedOutExp[:0] - case <-r.activateRecoverySender: - ticker.Reset(1 * time.Millisecond) } } } @@ -717,6 +954,7 @@ func (r *RLDP) startTransfer(ctx context.Context, transferId, data []byte, recov id: transferId, timeoutAt: recoverTimeoutAt * 1000, // ms data: data, + totalSize: uint64(len(data)), rldp: r, } @@ -754,39 +992,80 @@ func (t *activeTransfer) prepareNextPart() (bool, error) { return false, nil // fmt.Errorf("transfer timed out") } - partIndex := uint32(0) - if cp := t.getCurrentPart(); cp != nil { - partIndex = cp.index + 1 - } - - if len(t.data) <= int(partIndex*PartSize) { - // all parts sent + if len(t.data) == 0 { return false, nil } - payload := t.data[partIndex*PartSize:] + partIndex := t.nextPartIndex + + payload := t.data if len(payload) > int(PartSize) { payload = payload[:PartSize] } - enc, err := raptorq.NewRaptorQ(DefaultSymbolSize).CreateEncoder(payload) - if err != nil { - return false, fmt.Errorf("failed to create raptorq object encoder: %w", err) + if len(payload) == 0 { + return false, nil } + remaining := t.data[len(payload):] - part := activeTransferPart{ - encoder: enc, - seqno: 0, - index: partIndex, - feq: FECRaptorQ{ + cnt := uint32(len(payload))/DefaultSymbolSize + 1 + + var err error + var enc fecEncoder + var fec FEC + + //goland:noinspection GoBoolExpressions + if MultiFECMode && len(payload) < int(RoundRobinFECLimit) { + enc, err = roundrobin.NewEncoder(payload, DefaultSymbolSize) + if err != nil { + return false, fmt.Errorf("failed to create rr object encoder: %w", err) + } + + fec = FECRoundRobin{ DataSize: uint32(len(payload)), SymbolSize: DefaultSymbolSize, - SymbolsCount: enc.BaseSymbolsNum(), - }, - nextRecoverDelay: 30, + SymbolsCount: cnt, + } + } else { + enc, err = raptorq.NewRaptorQ(DefaultSymbolSize).CreateEncoder(payload) + if err != nil { + return false, fmt.Errorf("failed to create raptorq object encoder: %w", err) + } + + fec = FECRaptorQ{ + DataSize: uint32(len(payload)), + SymbolSize: DefaultSymbolSize, + SymbolsCount: cnt, + } + } + + part := activeTransferPart{ + encoder: enc, + seqno: 0, + index: partIndex, + fec: fec, + fecSymbolsCount: fec.GetSymbolsCount(), + fecSymbolSize: fec.GetSymbolSize(), + nextRecoverDelay: 15, + fastSeqnoTill: cnt + cnt/50 + 1, // +2% transfer: t, } + pt := uint32(1) << uint32(math.Ceil(math.Log2(float64(part.fecSymbolsCount)))) + if pt > 16<<10 { + pt = 16 << 10 + } else if pt < 64 { + pt = 64 + } + part.sendClock = NewSendClock(int(pt)) + + if len(remaining) == 0 { + t.data = nil + } else { + t.data = remaining + } + + t.nextPartIndex++ t.currentPart.Store(&part) return true, nil } @@ -799,44 +1078,48 @@ func (r *RLDP) sendFastSymbols(ctx context.Context, transfer *activeTransfer) er p := MessagePart{ TransferID: transfer.id, - FecType: part.feq, + FecType: part.fec, Part: part.index, - TotalSize: uint64(len(transfer.data)), + TotalSize: transfer.totalSize, } - smb := uint32(1) - if s := part.feq.SymbolsCount / 33; s > smb { - smb = s - } - - sc := part.feq.SymbolsCount + smb // ~3% recovery + sc := part.fastSeqnoTill - for i := uint32(0); i < sc; i++ { - p.Seqno = i - p.Data = part.encoder.GenSymbol(i) + isV2 := r.useV2.Load() == 1 + maxBatch := int(^uint(0) >> 1) + seqno := uint32(0) + for seqno < sc { + remaining := uint64(sc - seqno) + if remaining > uint64(maxBatch) { + remaining = uint64(maxBatch) + } - var msgPart tl.Serializable = p - if r.useV2 { - msgPart = MessagePartV2(p) + batch := r.rateLimit.ConsumePackets(int(remaining), int(part.fecSymbolSize)) + if batch == 0 { + break } + now := time.Now().UnixMilli() - for { - if r.useV2 && !r.rateLimit.TryConsume() { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(1 * time.Millisecond): - } - continue + for i := 0; i < batch; i++ { + currentSeqno := seqno + p.Seqno = currentSeqno + p.Data = part.encoder.GenSymbol(currentSeqno) + + var msgPart tl.Serializable = p + if isV2 { + msgPart = MessagePartV2(p) } + part.sendClock.OnSend(currentSeqno, now) if err := r.adnl.SendCustomMessage(ctx, msgPart); err != nil { - return fmt.Errorf("failed to send message part %d: %w", i, err) + return fmt.Errorf("failed to send message part %d: %w", currentSeqno, err) } - break + + seqno++ } } - atomic.StoreUint32(&part.seqno, sc) + atomic.StoreUint32(&part.seqno, seqno) + part.recoveryReady.Store(1) select { case r.activateRecoverySender <- true: @@ -874,15 +1157,16 @@ type AsyncQueryResult struct { } func (r *RLDP) DoQueryAsync(ctx context.Context, maxAnswerSize uint64, id []byte, query tl.Serializable, result chan<- AsyncQueryResult) error { - timeout, ok := ctx.Deadline() - if !ok { - timeout = time.Now().Add(15 * time.Second) - } - if len(id) != 32 { return errors.New("invalid id") } + now := time.Now() + timeout, ok := ctx.Deadline() + if !ok { + timeout = now.Add(15 * time.Second) + } + q := &Query{ ID: id, MaxAnswerSize: maxAnswerSize, @@ -890,6 +1174,11 @@ func (r *RLDP) DoQueryAsync(ctx context.Context, maxAnswerSize uint64, id []byte Data: query, } + if uxMin := now.Unix() + 2; int64(q.Timeout) < uxMin { + // because timeout in seconds, we should add some to avoid an early drop + q.Timeout = uint32(uxMin) + } + data, err := tl.Serialize(q, true) if err != nil { return fmt.Errorf("failed to serialize query: %w", err) @@ -947,6 +1236,11 @@ func (r *RLDP) SendAnswer(ctx context.Context, maxAnswerSize uint64, timeoutAt u tm = int64(timeoutAt) } + if minT := time.Now().Unix() + 1; tm < minT { + // give at least 1 sec in case of a clock problem + tm = minT + } + if err = r.startTransfer(ctx, reverseTransferId(toTransferId), data, tm); err != nil { return fmt.Errorf("failed to send partitioned answer: %w", err) } diff --git a/adnl/rldp/client_test.go b/adnl/rldp/client_test.go index b7fbb26e..f0c7bcc4 100644 --- a/adnl/rldp/client_test.go +++ b/adnl/rldp/client_test.go @@ -6,15 +6,17 @@ import ( "crypto/ed25519" "crypto/rand" "crypto/sha256" - "encoding/hex" "errors" + "fmt" "github.com/xssnick/raptorq" "github.com/xssnick/tonutils-go/adnl" "github.com/xssnick/tonutils-go/tl" "log" + "net" "net/http" "net/url" "reflect" + "runtime" "strings" "testing" "time" @@ -24,6 +26,8 @@ func init() { tl.Register(testRequest{}, "http.request id:int256 method:string url:string http_version:string headers:(vector http.header) = http.Response") tl.Register(testResponse{}, "http.response http_version:string status_code:int reason:string headers:(vector http.header) no_payload:Bool = http.Response") tl.Register(testHeader{}, "") + tl.Register(benchRequest{}, "benchRequest") + tl.Register(benchResponse{}, "benchResponse") } type MockADNL struct { @@ -63,12 +67,21 @@ func (m MockADNL) SendCustomMessage(ctx context.Context, req tl.Serializable) er func (m MockADNL) Close() { } +type benchRequest struct { + WantLen uint32 `tl:"int"` +} + +type benchResponse struct { + Data []byte `tl:"bytes"` +} + type testRequest struct { ID []byte `tl:"int256"` Method string `tl:"string"` URL string `tl:"string"` Version string `tl:"string"` Headers []testHeader `tl:"vector struct"` + RespSz uint64 `tl:"long"` } type testResponse struct { @@ -665,7 +678,7 @@ func TestRLDP_ClientServer(t *testing.T) { res := testResponse{ Version: "HTTP/1.1", StatusCode: int32(200), - Reason: "test ok:" + hex.EncodeToString(q.ID) + q.URL, + Reason: q.URL, Headers: []testHeader{{"test", "test"}}, NoPayload: true, } @@ -710,12 +723,13 @@ func TestRLDP_ClientServer(t *testing.T) { t.Fatal("bad client execution, err: ", err) } - if resp.Reason != "test ok:"+hex.EncodeToString(make([]byte, 32))+u { + if resp.Reason != u { t.Fatal("bad response data") } }) Logger = log.Println + t.Run("big multipart 10mb", func(t *testing.T) { old := MaxUnexpectedTransferSize MaxUnexpectedTransferSize = 1 << 30 @@ -736,9 +750,226 @@ func TestRLDP_ClientServer(t *testing.T) { t.Fatal("bad client execution, err: ", err) } - if resp.Reason != "test ok:"+hex.EncodeToString(make([]byte, 32))+u { + if resp.Reason != u { + t.Fatal("bad response data") + } + }) + + t.Run("big multipart 4mb rr", func(t *testing.T) { + old := MaxUnexpectedTransferSize + MaxUnexpectedTransferSize = 1 << 30 + MultiFECMode = true + defer func() { + MaxUnexpectedTransferSize = old + MultiFECMode = false + RoundRobinFECLimit = 1 << 30 + }() + + u := strings.Repeat("a", 4*1024*1024) + + var resp testResponse + err := cr.DoQuery(context.Background(), 4096+uint64(len(u)), testRequest{ + ID: make([]byte, 32), + Method: "GET", + URL: u, + Version: "1", + }, &resp) + if err != nil { + t.Fatal("bad client execution, err: ", err) + } + + if resp.Reason != u { t.Fatal("bad response data") } }) } + +func BenchmarkRLDP_ClientServer(b *testing.B) { + old := MaxUnexpectedTransferSize + MaxUnexpectedTransferSize = 1 << 30 + defer func() { + MaxUnexpectedTransferSize = old + }() + + defaultSizes := []uint32{16 << 10, 256 << 10, 1 << 20, 4 << 20, 10 << 20} + + scenarios := []struct { + name string + sizes []uint32 + setup func(*testing.B) (*RLDP, func()) + withParallel bool + }{ + { + name: "loopback_rr", + sizes: defaultSizes, + setup: func(b *testing.B) (*RLDP, func()) { + oldLim := RoundRobinFECLimit + RoundRobinFECLimit = 2 << 30 + MultiFECMode = true + rl, end := setupLoopbackBenchmark(b) + + return rl, func() { + end() + RoundRobinFECLimit = oldLim + } + }, + withParallel: true, + }, + { + name: "loopback_raptorq", + sizes: defaultSizes, + setup: setupLoopbackBenchmark, + withParallel: true, + }, + { + // it requires some time to speedup by bbr, so will show a low rate + name: "netem_loss_raptorq", + sizes: []uint32{4 << 20}, + setup: func(tb *testing.B) (*RLDP, func()) { + return setupNetemBenchmark(tb, 0.02, 50*time.Millisecond, 5*time.Millisecond) + }, + withParallel: true, + }, + } + + for _, sc := range scenarios { + sc := sc + b.Run(sc.name, func(b *testing.B) { + client, cleanup := sc.setup(b) + defer cleanup() + runRLDPBenchSizes(b, client, sc.sizes, sc.withParallel) + }) + } +} + +func runRLDPBenchSizes(b *testing.B, client *RLDP, sizes []uint32, withParallel bool) { + for _, sz := range sizes { + b.Run(fmt.Sprintf("resp=%dKB", sz>>10), func(b *testing.B) { + b.SetBytes(int64(sz)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + var resp benchResponse + if err := client.DoQuery(context.Background(), 1<<30, benchRequest{ + WantLen: sz, + }, &resp); err != nil { + b.Fatalf("client exec err: %v", err) + } + } + }) + + if withParallel { + b.Run(fmt.Sprintf("resp=%dKB/parallel", sz>>10), func(b *testing.B) { + b.SetBytes(int64(sz)) + b.SetParallelism(runtime.NumCPU()) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var resp benchResponse + if err := client.DoQuery(context.Background(), 1<<30, benchRequest{ + WantLen: sz, + }, &resp); err != nil { + b.Fatalf("client exec err: %v", err) + } + } + }) + }) + } + } +} + +func configureBenchServer(g *adnl.Gateway) { + g.SetConnectionHandler(func(client adnl.Peer) error { + conn := NewClientV2(client) + conn.SetOnQuery(func(transferId []byte, query *Query) error { + q := query.Data.(benchRequest) + res := benchResponse{Data: make([]byte, q.WantLen)} + return conn.SendAnswer(context.Background(), query.MaxAnswerSize, query.Timeout, query.ID, transferId, res) + }) + return nil + }) +} + +func setupLoopbackBenchmark(b *testing.B) (*RLDP, func()) { + srvPub, srvKey, err := ed25519.GenerateKey(nil) + if err != nil { + b.Fatal(err) + } + + _, cliKey, err := ed25519.GenerateKey(nil) + if err != nil { + b.Fatal(err) + } + + srv := adnl.NewGateway(srvKey) + if err := srv.StartServer("127.0.0.1:19157"); err != nil { + b.Fatal(err) + } + configureBenchServer(srv) + + cliGateway := adnl.NewGateway(cliKey) + if err := cliGateway.StartClient(); err != nil { + b.Fatal(err) + } + + cli, err := cliGateway.RegisterClient("127.0.0.1:19157", srvPub) + if err != nil { + b.Fatal(err) + } + + client := NewClientV2(cli) + + cleanup := func() { + client.Close() + _ = cliGateway.Close() + _ = srv.Close() + } + + return client, cleanup +} + +func setupNetemBenchmark(b *testing.B, loss float64, baseDelay, jitter time.Duration) (*RLDP, func()) { + srvPub, srvKey, err := ed25519.GenerateKey(nil) + if err != nil { + b.Fatal(err) + } + + _, cliKey, err := ed25519.GenerateKey(nil) + if err != nil { + b.Fatal(err) + } + + srvConn, cliConn := newMemPacketConnPair(loss, baseDelay, jitter, 512<<10) + + srv := adnl.NewGatewayWithNetManager(srvKey, adnl.NewSingleNetReader(func(string) (net.PacketConn, error) { + return srvConn, nil + })) + if err := srv.StartServer("127.0.0.1:19158"); err != nil { + b.Fatal(err) + } + configureBenchServer(srv) + + cliGateway := adnl.NewGatewayWithNetManager(cliKey, adnl.NewSingleNetReader(func(string) (net.PacketConn, error) { + return cliConn, nil + })) + if err := cliGateway.StartClient(); err != nil { + b.Fatal(err) + } + + cli, err := cliGateway.RegisterClient("127.0.0.1:19158", srvPub) + if err != nil { + b.Fatal(err) + } + + client := NewClientV2(cli) + + cleanup := func() { + client.Close() + _ = cliGateway.Close() + _ = srv.Close() + } + + return client, cleanup +} diff --git a/adnl/rldp/fec.go b/adnl/rldp/fec.go index e6ea8313..4f7ddd3e 100644 --- a/adnl/rldp/fec.go +++ b/adnl/rldp/fec.go @@ -13,12 +13,30 @@ func init() { tl.Register(FECOnline{}, "fec.online data_size:int symbol_size:int symbols_count:int = fec.Type") } +type FEC interface { + GetDataSize() uint32 + GetSymbolSize() uint32 + GetSymbolsCount() uint32 +} + type FECRaptorQ struct { DataSize uint32 // `tl:"int"` SymbolSize uint32 // `tl:"int"` SymbolsCount uint32 // `tl:"int"` } +func (f FECRaptorQ) GetDataSize() uint32 { + return f.DataSize +} + +func (f FECRaptorQ) GetSymbolSize() uint32 { + return f.SymbolSize +} + +func (f FECRaptorQ) GetSymbolsCount() uint32 { + return f.SymbolsCount +} + func (f *FECRaptorQ) Parse(data []byte) ([]byte, error) { if len(data) < 12 { return nil, fmt.Errorf("fec raptor data too short") @@ -44,6 +62,37 @@ type FECRoundRobin struct { SymbolsCount uint32 `tl:"int"` } +func (f FECRoundRobin) GetDataSize() uint32 { + return f.DataSize +} + +func (f FECRoundRobin) GetSymbolSize() uint32 { + return f.SymbolSize +} + +func (f FECRoundRobin) GetSymbolsCount() uint32 { + return f.SymbolsCount +} + +func (f *FECRoundRobin) Parse(data []byte) ([]byte, error) { + if len(data) < 12 { + return nil, fmt.Errorf("fec rr data too short") + } + f.DataSize = binary.LittleEndian.Uint32(data[:4]) + f.SymbolSize = binary.LittleEndian.Uint32(data[4:8]) + f.SymbolsCount = binary.LittleEndian.Uint32(data[8:12]) + return data[12:], nil +} + +func (f *FECRoundRobin) Serialize(buf *bytes.Buffer) error { + tmp := make([]byte, 12) + binary.LittleEndian.PutUint32(tmp[0:4], f.DataSize) + binary.LittleEndian.PutUint32(tmp[4:8], f.SymbolSize) + binary.LittleEndian.PutUint32(tmp[8:12], f.SymbolsCount) + buf.Write(tmp) + return nil +} + type FECOnline struct { DataSize uint32 `tl:"int"` SymbolSize uint32 `tl:"int"` diff --git a/adnl/rldp/netem_test.go b/adnl/rldp/netem_test.go new file mode 100644 index 00000000..9d7735ff --- /dev/null +++ b/adnl/rldp/netem_test.go @@ -0,0 +1,333 @@ +package rldp + +import ( + "container/heap" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" +) + +type timeoutError struct{} + +func (timeoutError) Error() string { return "i/o timeout" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +var errTimeout timeoutError + +type memPacket struct { + data []byte + addr net.Addr +} + +type scheduledPacket struct { + when time.Time + pkt memPacket +} + +type packetQueue []scheduledPacket + +func (pq packetQueue) Len() int { return len(pq) } + +func (pq packetQueue) Less(i, j int) bool { return pq[i].when.Before(pq[j].when) } + +func (pq packetQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } + +func (pq *packetQueue) Push(x interface{}) { + *pq = append(*pq, x.(scheduledPacket)) +} + +func (pq *packetQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + *pq = old[:n-1] + return item +} + +type memPacketConn struct { + name string + inbox chan memPacket + peer *memPacketConn + closeOnce sync.Once + closed atomic.Bool + closeCh chan struct{} + + baseDelay time.Duration + jitter time.Duration + loss float64 + + rngMu sync.Mutex + rng *rand.Rand + + readDeadline atomic.Int64 + writeDeadline atomic.Int64 + + localAddr *net.UDPAddr + + dispatcherOnce sync.Once + wakeCh chan struct{} + queueMu sync.Mutex + queue packetQueue +} + +func newMemPacketConnPair(loss float64, baseDelay, jitter time.Duration, buf int) (*memPacketConn, *memPacketConn) { + if buf <= 0 { + buf = 1024 + } + now := time.Now().UnixNano() + basePort := 20000 + int(now%10000) + a := &memPacketConn{ + name: "server", + inbox: make(chan memPacket, buf), + closeCh: make(chan struct{}), + baseDelay: baseDelay, + jitter: jitter, + loss: loss, + rng: rand.New(rand.NewSource(now)), + localAddr: &net.UDPAddr{IP: net.IPv4(10, 0, 0, 1), Port: basePort}, + wakeCh: make(chan struct{}, 1), + } + b := &memPacketConn{ + name: "client", + inbox: make(chan memPacket, buf), + closeCh: make(chan struct{}), + baseDelay: baseDelay, + jitter: jitter, + loss: loss, + rng: rand.New(rand.NewSource(now + 1)), + localAddr: &net.UDPAddr{IP: net.IPv4(10, 0, 0, 2), Port: basePort + 1}, + wakeCh: make(chan struct{}, 1), + } + a.peer = b + b.peer = a + a.startDispatcher() + b.startDispatcher() + return a, b +} + +func (c *memPacketConn) randFloat() float64 { + c.rngMu.Lock() + f := c.rng.Float64() + c.rngMu.Unlock() + return f +} + +func (c *memPacketConn) LocalAddr() net.Addr { + return c.localAddr +} + +func (c *memPacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if c.closed.Load() { + return 0, nil, net.ErrClosed + } + var ( + timer *time.Timer + timerCh <-chan time.Time + ) + if deadline := c.readDeadline.Load(); deadline > 0 { + d := time.Until(time.Unix(0, deadline)) + if d <= 0 { + return 0, nil, errTimeout + } + timer = time.NewTimer(d) + timerCh = timer.C + defer timer.Stop() + } + select { + case pkt, ok := <-c.inbox: + if !ok { + return 0, nil, net.ErrClosed + } + n := copy(b, pkt.data) + return n, pkt.addr, nil + case <-c.closeCh: + return 0, nil, net.ErrClosed + case <-timerCh: + return 0, nil, errTimeout + } +} + +func (c *memPacketConn) WriteTo(b []byte, _ net.Addr) (int, error) { + if c.closed.Load() { + return 0, net.ErrClosed + } + peer := c.peer + if peer == nil || peer.closed.Load() { + return 0, net.ErrClosed + } + if deadline := c.writeDeadline.Load(); deadline > 0 && time.Now().After(time.Unix(0, deadline)) { + return 0, errTimeout + } + if len(b) == 0 { + return 0, nil + } + if c.loss > 0 && c.randFloat() < c.loss { + return len(b), nil + } + payload := make([]byte, len(b)) + copy(payload, b) + delay := c.baseDelay + if c.jitter > 0 { + j := time.Duration((c.randFloat()*2 - 1) * float64(c.jitter)) + delay += j + if delay < 0 { + delay = 0 + } + } + peer.startDispatcher() + pkt := memPacket{data: payload, addr: c.localAddr} + peer.enqueuePacket(pkt, time.Now().Add(delay)) + return len(b), nil +} + +func (c *memPacketConn) Close() error { + c.closeOnce.Do(func() { + c.closed.Store(true) + close(c.closeCh) + close(c.inbox) + c.wake() + }) + return nil +} + +func (c *memPacketConn) SetDeadline(t time.Time) error { + if err := c.SetReadDeadline(t); err != nil { + return err + } + return c.SetWriteDeadline(t) +} + +func (c *memPacketConn) SetReadDeadline(t time.Time) error { + if t.IsZero() { + c.readDeadline.Store(0) + } else { + c.readDeadline.Store(t.UnixNano()) + } + return nil +} + +func (c *memPacketConn) SetWriteDeadline(t time.Time) error { + if t.IsZero() { + c.writeDeadline.Store(0) + } else { + c.writeDeadline.Store(t.UnixNano()) + } + return nil +} + +func (c *memPacketConn) SetReadBuffer(int) error { return nil } +func (c *memPacketConn) SetWriteBuffer(int) error { return nil } + +func (c *memPacketConn) startDispatcher() { + c.dispatcherOnce.Do(func() { + if c.wakeCh == nil { + c.wakeCh = make(chan struct{}, 1) + } + heap.Init(&c.queue) + go c.dispatchLoop() + }) +} + +func (c *memPacketConn) enqueuePacket(pkt memPacket, when time.Time) { + if c.closed.Load() { + return + } + c.queueMu.Lock() + earliest := c.queue.Len() == 0 + if !earliest { + earliest = when.Before(c.queue[0].when) + } + heap.Push(&c.queue, scheduledPacket{when: when, pkt: pkt}) + c.queueMu.Unlock() + if earliest { + c.wake() + } +} + +func (c *memPacketConn) dispatchLoop() { + timer := time.NewTimer(time.Hour) + if !timer.Stop() { + <-timer.C + } + for { + c.queueMu.Lock() + if c.queue.Len() == 0 { + closed := c.closed.Load() + c.queueMu.Unlock() + if closed { + return + } + select { + case <-c.closeCh: + return + case <-c.wakeCh: + continue + } + } + next := c.queue[0] + wait := time.Until(next.when) + if wait <= 0 { + heap.Pop(&c.queue) + c.queueMu.Unlock() + c.deliver(next.pkt) + continue + } + c.queueMu.Unlock() + + resetTimer(timer, wait) + select { + case <-timer.C: + continue + case <-c.closeCh: + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + return + case <-c.wakeCh: + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + continue + } + } +} + +func (c *memPacketConn) deliver(pkt memPacket) { + if c.closed.Load() { + return + } + select { + case c.inbox <- pkt: + case <-c.closeCh: + default: + } +} + +func (c *memPacketConn) wake() { + if c.wakeCh == nil { + return + } + select { + case c.wakeCh <- struct{}{}: + default: + } +} + +func resetTimer(t *time.Timer, d time.Duration) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + t.Reset(d) +} diff --git a/adnl/rldp/queue.go b/adnl/rldp/queue.go new file mode 100644 index 00000000..fc7bba83 --- /dev/null +++ b/adnl/rldp/queue.go @@ -0,0 +1,47 @@ +package rldp + +type Queue struct { + ch chan *MessagePart +} + +func NewQueue(sz int) *Queue { + return &Queue{ + ch: make(chan *MessagePart, sz), + } +} + +func (q *Queue) Enqueue(m *MessagePart) { + for { + select { + case q.ch <- m: + // written + return + default: + } + + select { + case <-q.ch: + // not written, drop oldest + default: + } + } +} + +func (q *Queue) Dequeue() (*MessagePart, bool) { + select { + case m := <-q.ch: + return m, true + default: + return nil, false + } +} + +func (q *Queue) Drain() { + for { + select { + case <-q.ch: + default: + return + } + } +} diff --git a/adnl/rldp/queue_test.go b/adnl/rldp/queue_test.go new file mode 100644 index 00000000..f9c89ccf --- /dev/null +++ b/adnl/rldp/queue_test.go @@ -0,0 +1,203 @@ +package rldp + +import ( + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func mp(i uint32) *MessagePart { return &MessagePart{Seqno: i} } + +func TestQueue_Basic(t *testing.T) { + q := NewQueue(4) + + if _, ok := q.Dequeue(); ok { + t.Fatalf("queue must be empty initially") + } + + // put 3 + for i := uint32(0); i < 3; i++ { + q.Enqueue(mp(i)) + } + + // get 3 in order + for i := uint32(0); i < 3; i++ { + m, ok := q.Dequeue() + if !ok { + t.Fatalf("expected ok for i=%d", i) + } + + if m.Seqno != i { + t.Fatalf("want=%d got=%d", i, m.Seqno) + } + } + + // empty again + if _, ok := q.Dequeue(); ok { + t.Fatalf("queue must be empty") + } +} + +func TestQueue_OverwriteOldest(t *testing.T) { + q := NewQueue(4) + + for i := uint32(0); i < 6; i++ { + q.Enqueue(mp(i)) + } + + want := []uint32{2, 3, 4, 5} + for i, w := range want { + m, ok := q.Dequeue() + if !ok { + t.Fatalf("expected ok at i=%d", i) + } + if m == nil || m.Seqno != w { + t.Fatalf("want=%d got=%v at i=%d", w, m, i) + } + } + // пусто + if _, ok := q.Dequeue(); ok { + t.Fatalf("queue must be empty") + } +} + +func TestQueue_OverwriteInterleaved(t *testing.T) { + q := NewQueue(2) + + q.Enqueue(mp(0)) + q.Enqueue(mp(1)) + q.Enqueue(mp(2)) + q.Enqueue(mp(3)) + + // ожидаем 2,3 + m, ok := q.Dequeue() + if !ok || m.Seqno != 2 { + t.Fatalf("want=2 got=%v", m) + } + m, ok = q.Dequeue() + if !ok || m.Seqno != 3 { + t.Fatalf("want=3 got=%v", m) + } +} + +func TestQueue_DequeueEmpty(t *testing.T) { + q := NewQueue(1) + if m, ok := q.Dequeue(); ok || m != nil { + t.Fatalf("should be empty") + } +} + +func TestQueue_Concurrent_Stress_NoNilOnOk(t *testing.T) { + const ( + capacity = 256 + producers = 8 + perProducer = 50_000 + ) + + q := NewQueue(capacity) + + var wg sync.WaitGroup + wg.Add(producers) + + // consumer + var okCnt, nilOnOk int64 + done := make(chan struct{}) + go func() { + defer close(done) + stopAt := time.Now().Add(3 * time.Second) + for time.Now().Before(stopAt) { + if m, ok := q.Dequeue(); ok { + atomic.AddInt64(&okCnt, 1) + if m == nil { + atomic.AddInt64(&nilOnOk, 1) + } + } else { + runtime.Gosched() + } + } + }() + + // producers + for p := 0; p < producers; p++ { + pid := uint32(p) + go func(pid uint32) { + defer wg.Done() + for i := uint32(0); i < perProducer; i++ { + q.Enqueue(&MessagePart{Part: pid, Seqno: i}) + if (i & 1023) == 0 { + runtime.Gosched() + } + } + }(pid) + } + + wg.Wait() + <-done + + if n := atomic.LoadInt64(&nilOnOk); n != 0 { + t.Fatalf("got %d cases of (nil, ok=true) — must never happen", n) + } + if atomic.LoadInt64(&okCnt) == 0 { + t.Fatalf("consumer should have received some items") + } +} + +func TestQueue_NoDeadlockOnFull(t *testing.T) { + q := NewQueue(4) + + for i := uint32(0); i < 4; i++ { + q.Enqueue(mp(i)) + } + + stop := make(chan struct{}) + go func() { + defer close(stop) + for i := uint32(4); i < 20_000; i++ { + q.Enqueue(mp(i)) + if (i & 4095) == 0 { + runtime.Gosched() + } + } + }() + + timeout := time.After(500 * time.Millisecond) + read := 0 +loop: + for { + select { + case <-timeout: + break loop + default: + if _, ok := q.Dequeue(); ok { + read++ + } else { + runtime.Gosched() + } + } + } + + <-stop + if read == 0 { + t.Fatalf("expected to read something without deadlock") + } +} + +func TestQueue_OrderAfterOverwriteWindow(t *testing.T) { + q := NewQueue(8) + + for i := uint32(0); i < 20; i++ { + q.Enqueue(mp(i)) + } + + for want := uint32(12); want < 20; want++ { + m, ok := q.Dequeue() + if !ok || m == nil || m.Seqno != want { + t.Fatalf("want=%d got=%v (ok=%v)", want, m, ok) + } + } + if _, ok := q.Dequeue(); ok { + t.Fatalf("queue must be empty") + } +} diff --git a/adnl/rldp/rldp.go b/adnl/rldp/rldp.go index 9f6ba109..257035e8 100644 --- a/adnl/rldp/rldp.go +++ b/adnl/rldp/rldp.go @@ -63,7 +63,7 @@ type CompleteV2 struct { type MessagePart struct { TransferID []byte // `tl:"int256"` - FecType any // `tl:"struct boxed [fec.roundRobin,fec.raptorQ,fec.online]"` + FecType FEC // `tl:"struct boxed [fec.roundRobin,fec.raptorQ,fec.online]"` Part uint32 // `tl:"int"` TotalSize uint64 // `tl:"long"` Seqno uint32 // `tl:"int"` @@ -78,12 +78,17 @@ func (m *MessagePart) Parse(data []byte) ([]byte, error) { transfer := make([]byte, 32) copy(transfer, data) - var fec FECRaptorQ - data, err := tl.Parse(&fec, data[32:], true) + var fecAny any + data, err := tl.Parse(&fecAny, data[32:], true) if err != nil { return nil, err } + fec, ok := fecAny.(FEC) + if !ok { + return nil, errors.New("invalid fec type") + } + if len(data) < 20 { return nil, errors.New("message part is too short") } @@ -110,37 +115,110 @@ func (m *MessagePart) Parse(data []byte) ([]byte, error) { func (m *MessagePart) Serialize(buf *bytes.Buffer) error { switch m.FecType.(type) { case FECRaptorQ: - if len(m.TransferID) == 0 { - buf.Write(make([]byte, 32)) - } else if len(m.TransferID) != 32 { - return errors.New("invalid transfer id") - } else { - buf.Write(m.TransferID) - } - - _, err := tl.Serialize(m.FecType, true, buf) - if err != nil { - return err - } + case FECRoundRobin: default: return errors.New("invalid fec type") } + if len(m.TransferID) == 0 { + buf.Write(make([]byte, 32)) + } else if len(m.TransferID) != 32 { + return errors.New("invalid transfer id") + } else { + buf.Write(m.TransferID) + } + + _, err := tl.Serialize(m.FecType, true, buf) + if err != nil { + return err + } + tmp := make([]byte, 16) binary.LittleEndian.PutUint32(tmp, m.Part) binary.LittleEndian.PutUint64(tmp[4:], m.TotalSize) binary.LittleEndian.PutUint32(tmp[12:], m.Seqno) buf.Write(tmp) - tl.ToBytesToBuffer(buf, m.Data) - return nil + return tl.ToBytesToBuffer(buf, m.Data) } type MessagePartV2 struct { TransferID []byte `tl:"int256"` - FecType any `tl:"struct boxed [fec.roundRobin,fec.raptorQ,fec.online]"` + FecType FEC `tl:"struct boxed [fec.roundRobin,fec.raptorQ,fec.online]"` Part uint32 `tl:"int"` TotalSize uint64 `tl:"long"` Seqno uint32 `tl:"int"` Data []byte `tl:"bytes"` } + +func (m *MessagePartV2) Parse(data []byte) ([]byte, error) { + if len(data) < 56 { + return nil, errors.New("message part is too short") + } + + transfer := make([]byte, 32) + copy(transfer, data) + + var fecAny any + data, err := tl.Parse(&fecAny, data[32:], true) + if err != nil { + return nil, err + } + + fec, ok := fecAny.(FEC) + if !ok { + return nil, errors.New("invalid fec type") + } + + if len(data) < 20 { + return nil, errors.New("message part is too short") + } + + part := binary.LittleEndian.Uint32(data) + size := binary.LittleEndian.Uint64(data[4:]) + seq := binary.LittleEndian.Uint32(data[12:]) + + slc, data, err := tl.FromBytes(data[16:]) + if err != nil { + return nil, fmt.Errorf("tl.FromBytes: %v", err) + } + + m.TransferID = transfer + m.FecType = fec + m.Part = part + m.TotalSize = size + m.Seqno = seq + m.Data = slc + + return data, nil +} + +func (m *MessagePartV2) Serialize(buf *bytes.Buffer) error { + switch m.FecType.(type) { + case FECRaptorQ: + case FECRoundRobin: + default: + return errors.New("invalid fec type") + } + + if len(m.TransferID) == 0 { + buf.Write(make([]byte, 32)) + } else if len(m.TransferID) != 32 { + return errors.New("invalid transfer id") + } else { + buf.Write(m.TransferID) + } + + _, err := tl.Serialize(m.FecType, true, buf) + if err != nil { + return err + } + + tmp := make([]byte, 16) + binary.LittleEndian.PutUint32(tmp, m.Part) + binary.LittleEndian.PutUint64(tmp[4:], m.TotalSize) + binary.LittleEndian.PutUint32(tmp[12:], m.Seqno) + buf.Write(tmp) + + return tl.ToBytesToBuffer(buf, m.Data) +} diff --git a/adnl/rldp/roundrobin/coder.go b/adnl/rldp/roundrobin/coder.go new file mode 100644 index 00000000..c9810efe --- /dev/null +++ b/adnl/rldp/roundrobin/coder.go @@ -0,0 +1,99 @@ +package roundrobin + +import "errors" + +type Encoder struct { + data []byte + symbolSize uint32 + symbolsCount uint32 +} + +type Decoder struct { + data []byte + mask []bool + left uint32 + symbolSize uint32 + symbolsCount uint32 +} + +func NewEncoder(data []byte, maxSymbolSize uint32) (*Encoder, error) { + syms := (len(data) + int(maxSymbolSize) - 1) / int(maxSymbolSize) + if syms == 0 { + return nil, errors.New("data must be non-empty") + } + + return &Encoder{ + data: data, + symbolSize: maxSymbolSize, + symbolsCount: uint32(syms), + }, nil +} + +func (e *Encoder) GenSymbol(id uint32) []byte { + if e.symbolsCount == 0 { + return nil + } + pos := id % e.symbolsCount + offset := pos * e.symbolSize + end := offset + e.symbolSize + + out := make([]byte, e.symbolSize) + if int(offset) < len(e.data) { + if int(end) > len(e.data) { + end = uint32(len(e.data)) + } + copy(out, e.data[offset:end]) + } + return out +} + +func NewDecoder(symbolSize uint32, dataSize uint32) (*Decoder, error) { + syms := (dataSize + symbolSize - 1) / symbolSize + if syms == 0 { + return nil, errors.New("dataSize must be > 0") + } + + return &Decoder{ + data: make([]byte, dataSize), + mask: make([]bool, syms), + left: syms, + symbolSize: symbolSize, + symbolsCount: syms, + }, nil +} + +func (d *Decoder) AddSymbol(id uint32, sym []byte) (bool, error) { + if uint32(len(sym)) != d.symbolSize { + return false, errors.New("invalid symbol length") + } + + if d.symbolsCount == 0 { + return false, errors.New("decoder not initialized") + } + + pos := id % d.symbolsCount + idx := int(pos) + if d.mask[idx] { + return d.left == 0, nil + } + + offset := idx * int(d.symbolSize) + end := offset + int(d.symbolSize) + if offset < len(d.data) { + if end > len(d.data) { + end = len(d.data) + } + copy(d.data[offset:end], sym[:end-offset]) + } + + d.mask[idx] = true + d.left-- + return d.left == 0, nil +} + +func (d *Decoder) Decode() (bool, []byte, error) { + if d.left != 0 { + return false, nil, errors.New("not ready") + } + return true, d.data, nil +} diff --git a/adnl/rldp/roundrobin/coder_test.go b/adnl/rldp/roundrobin/coder_test.go new file mode 100644 index 00000000..496238b5 --- /dev/null +++ b/adnl/rldp/roundrobin/coder_test.go @@ -0,0 +1,222 @@ +package roundrobin + +import ( + "crypto/rand" + mrand "math/rand" + "testing" + "time" +) + +func genRandomBytes(t *testing.T, n int) []byte { + t.Helper() + if n == 0 { + return nil + } + b := make([]byte, n) + if _, err := rand.Read(b); err != nil { + t.Fatalf("rand.Read: %v", err) + } + return b +} + +func TestNewEncoder_EmptyData(t *testing.T) { + _, err := NewEncoder(nil, 256) + if err == nil { + t.Fatalf("expected error for empty data, got nil") + } +} + +func TestNewDecoder_BadParams(t *testing.T) { + _, err := NewDecoder(256, 0) + if err == nil { + t.Fatalf("expected error for dataSize=0, got nil") + } +} + +func TestRoundTrip_ExactMultiple(t *testing.T) { + const symbolSize = 256 + const dataSize = 4096 + data := genRandomBytes(t, dataSize) + + enc, err := NewEncoder(data, symbolSize) + if err != nil { + t.Fatalf("NewEncoder: %v", err) + } + dec, err := NewDecoder(symbolSize, uint32(len(data))) + if err != nil { + t.Fatalf("NewDecoder: %v", err) + } + + for i := uint32(0); i < enc.symbolsCount; i++ { + s := enc.GenSymbol(i) + if uint32(len(s)) != symbolSize { + t.Fatalf("symbol len mismatch: got %d, want %d", len(s), symbolSize) + } + } + + ids := make([]uint32, enc.symbolsCount) + for i := range ids { + ids[i] = uint32(i) + } + r := mrand.New(mrand.NewSource(1)) + r.Shuffle(len(ids), func(i, j int) { ids[i], ids[j] = ids[j], ids[i] }) + + complete := false + for idx, id := range ids { + sym := enc.GenSymbol(id) + done, err := dec.AddSymbol(id, sym) + if err != nil { + t.Fatalf("AddSymbol #%d (id=%d): %v", idx, id, err) + } + if done && idx != len(ids)-1 { + t.Fatalf("completed too early at #%d", idx) + } + complete = done + } + if !complete { + t.Fatalf("not completed after feeding all symbols") + } + + ready, out, err := dec.Decode() + if err != nil { + t.Fatalf("Decode error: %v", err) + } + if !ready { + t.Fatalf("Decode not ready") + } + if len(out) != len(data) { + t.Fatalf("decoded length mismatch: got %d, want %d", len(out), len(data)) + } + if string(out) != string(data) { + t.Fatalf("decoded data mismatch") + } +} + +func TestRoundTrip_WithTailRemainder(t *testing.T) { + const symbolSize = 256 + const dataSize = 1000 + data := genRandomBytes(t, dataSize) + + enc, err := NewEncoder(data, symbolSize) + if err != nil { + t.Fatalf("NewEncoder: %v", err) + } + + lastID := enc.symbolsCount - 1 + last := enc.GenSymbol(lastID) + if len(last) != symbolSize { + t.Fatalf("last symbol len: got %d, want %d", len(last), symbolSize) + } + + dec, err := NewDecoder(symbolSize, uint32(len(data))) + if err != nil { + t.Fatalf("NewDecoder: %v", err) + } + + ids := make([]uint32, enc.symbolsCount) + for i := range ids { + ids[i] = uint32(i) + } + r := mrand.New(mrand.NewSource(time.Now().UnixNano())) + r.Shuffle(len(ids), func(i, j int) { ids[i], ids[j] = ids[j], ids[i] }) + + for _, id := range ids { + sym := enc.GenSymbol(id) + if _, err := dec.AddSymbol(id, sym); err != nil { + t.Fatalf("AddSymbol id=%d: %v", id, err) + } + if _, err := dec.AddSymbol(id, sym); err != nil { + t.Fatalf("AddSymbol duplicate id=%d: %v", id, err) + } + } + + ready, out, err := dec.Decode() + if err != nil { + t.Fatalf("Decode error: %v", err) + } + if !ready { + t.Fatalf("Decode not ready") + } + if len(out) != len(data) { + t.Fatalf("decoded length mismatch: got %d, want %d", len(out), len(data)) + } + if string(out) != string(data) { + t.Fatalf("decoded data mismatch") + } +} + +func TestInvalidSymbolLength(t *testing.T) { + const symbolSize = 128 + const dataSize = 1024 + data := genRandomBytes(t, dataSize) + + enc, err := NewEncoder(data, symbolSize) + if err != nil { + t.Fatalf("NewEncoder: %v", err) + } + dec, err := NewDecoder(symbolSize, uint32(len(data))) + if err != nil { + t.Fatalf("NewDecoder: %v", err) + } + + id := uint32(0) + okSym := enc.GenSymbol(id) + if len(okSym) != symbolSize { + t.Fatalf("GenSymbol produced wrong length: %d", len(okSym)) + } + + badShort := okSym[:symbolSize-1] + if _, err := dec.AddSymbol(id, badShort); err == nil { + t.Fatalf("expected error for short symbol, got nil") + } + + badLong := append(okSym, 0xFF) + if _, err := dec.AddSymbol(id, badLong); err == nil { + t.Fatalf("expected error for long symbol, got nil") + } +} + +func TestDecode_NotReady(t *testing.T) { + const symbolSize = 64 + const dataSize = 1000 + data := genRandomBytes(t, dataSize) + + enc, err := NewEncoder(data, symbolSize) + if err != nil { + t.Fatalf("NewEncoder: %v", err) + } + dec, err := NewDecoder(symbolSize, uint32(len(data))) + if err != nil { + t.Fatalf("NewDecoder: %v", err) + } + + for id := uint32(0); id < enc.symbolsCount-1; id++ { + if _, err := dec.AddSymbol(id, enc.GenSymbol(id)); err != nil { + t.Fatalf("AddSymbol: %v", err) + } + } + + if ready, _, err := dec.Decode(); err == nil || ready { + t.Fatalf("expected not ready error, got ready=%v err=%v", ready, err) + } +} + +func TestGenSymbol_ModuloBehavior(t *testing.T) { + const symbolSize = 128 + const dataSize = 777 + data := genRandomBytes(t, dataSize) + + enc, err := NewEncoder(data, symbolSize) + if err != nil { + t.Fatalf("NewEncoder: %v", err) + } + + if enc.symbolsCount < 2 { + t.Skip("need at least 2 symbols to test modulo behavior") + } + ref := enc.GenSymbol(1) + same := enc.GenSymbol(enc.symbolsCount + 1) + if string(ref) != string(same) { + t.Fatalf("GenSymbol modulo mismatch: got different payloads for ids 1 and symbolsCount+1") + } +} diff --git a/crc16/crc16.go b/crc16/crc16.go new file mode 100644 index 00000000..d17cb7a8 --- /dev/null +++ b/crc16/crc16.go @@ -0,0 +1,32 @@ +package crc16 + +const ( + poly uint16 = 0x1021 // CRC-16-CCITT + initCRC uint16 = 0x0000 // XMODEM init + xorOut uint16 = 0x0000 +) + +var table [256]uint16 + +func init() { + for i := 0; i < 256; i++ { + var c = uint16(i) << 8 + for j := 0; j < 8; j++ { + if (c & 0x8000) != 0 { + c = (c << 1) ^ poly + } else { + c <<= 1 + } + } + table[i] = c + } +} + +func ChecksumXMODEM(data []byte) uint16 { + c := initCRC + for _, b := range data { + idx := byte((c >> 8) ^ uint16(b)) + c = (c << 8) ^ table[idx] + } + return c ^ xorOut +} diff --git a/crc16/crc16_test.go b/crc16/crc16_test.go new file mode 100644 index 00000000..41deec1c --- /dev/null +++ b/crc16/crc16_test.go @@ -0,0 +1,23 @@ +package crc16 + +import ( + "testing" +) + +func TestChecksum(t *testing.T) { + tests := []struct { + input string + expected uint16 + }{ + {"", 0x0000}, + {"123456789", 0x31C3}, + {"hello", 0xC362}, + } + + for _, tt := range tests { + got := ChecksumXMODEM([]byte(tt.input)) + if got != tt.expected { + t.Errorf("ChecksumXMODEM(%q) = 0x%04X, want 0x%04X", tt.input, got, tt.expected) + } + } +} diff --git a/example/block-scan/main.go b/example/block-scan/main.go index 7ddb6d6a..97611bd1 100644 --- a/example/block-scan/main.go +++ b/example/block-scan/main.go @@ -25,7 +25,7 @@ func getNotSeenShards(ctx context.Context, api ton.APIClientWrapped, shard *ton. return nil, fmt.Errorf("get block data: %w", err) } - parents, err := b.BlockInfo.GetParentBlocks() + parents, err := ton.GetParentBlocks(&b.BlockInfo) if err != nil { return nil, fmt.Errorf("get parent blocks (%d:%x:%d): %w", shard.Workchain, uint64(shard.Shard), shard.Shard, err) } diff --git a/example/highload-wallet/main.go b/example/highload-wallet/main.go index 883a9b82..6c9826db 100644 --- a/example/highload-wallet/main.go +++ b/example/highload-wallet/main.go @@ -30,7 +30,7 @@ func main() { words := strings.Split("birth pattern then forest walnut then phrase walnut fan pumpkin pattern then cluster blossom verify then forest velvet pond fiction pattern collect then then", " ") // initialize high-load wallet - w, err := wallet.FromSeed(api, words, wallet.ConfigHighloadV3{ + w, err := wallet.FromSeedWithOptions(api, words, wallet.ConfigHighloadV3{ MessageTTL: 60 * 5, MessageBuilder: func(ctx context.Context, subWalletId uint32) (id uint32, createdAt int64, err error) { // Due to specific of externals emulation on liteserver, diff --git a/example/wallet-cold-alike/main.go b/example/wallet-cold-alike/main.go index e291ec99..54a2972b 100644 --- a/example/wallet-cold-alike/main.go +++ b/example/wallet-cold-alike/main.go @@ -31,7 +31,7 @@ func main() { // seed words of account, you can generate them with any wallet or using wallet.NewSeed() method words := strings.Split("birth pattern then forest walnut then phrase walnut fan pumpkin pattern then cluster blossom verify then forest velvet pond fiction pattern collect then then", " ") - w, err := wallet.FromSeed(api, words, wallet.V3) + w, err := wallet.FromSeedWithOptions(api, words, wallet.V3) if err != nil { log.Fatalln("FromSeed err:", err.Error()) return diff --git a/example/wallet-toncenter-api/main.go b/example/wallet-toncenter-api/main.go new file mode 100644 index 00000000..3be848b8 --- /dev/null +++ b/example/wallet-toncenter-api/main.go @@ -0,0 +1,96 @@ +package main + +import ( + "context" + "encoding/hex" + "github.com/xssnick/tonutils-go/address" + "github.com/xssnick/tonutils-go/tlb" + "github.com/xssnick/tonutils-go/ton/wallet" + "github.com/xssnick/tonutils-go/toncenter" + "log" + "strings" + "time" +) + +func main() { + tc := toncenter.New("https://testnet.toncenter.com", + // toncenter.WithAPIKey("YOUR_KEY"), + toncenter.WithTimeout(10*time.Second), + // the free rate limit without an api key is 1 per sec, + // but we set it a bit lower to be sure + toncenter.WithRateLimit(0.85), + ) + + // seed words of an account, you can generate them with any wallet or using wallet.NewSeed() method + words := strings.Split("giraffe soccer exotic sadness angry satoshi promote doctor odor joke rose deal nice inflict engine kiwi wheat eyebrow force envelope obvious tip weasel scan", " ") + + // convert seed to a private key, depending on a type of phrase + // WithBIP39(true) or WithLedger() options could be used + key, err := wallet.SeedToPrivateKeyWithOptions(words) + if err != nil { + log.Fatalln("SeedToPrivateKeyWithOptions err:", err.Error()) + return + } + + // as you can see, we are not passing WithAPI option, because external api will be used + w, err := wallet.FromPrivateKeyWithOptions(key, wallet.V4R2) + if err != nil { + log.Fatalln("FromSeed err:", err.Error()) + return + } + + log.Println("wallet address:", w.WalletAddress().Testnet(true)) + + addr := address.MustParseAddr("0QAcsLrH81e_Wh3nrH7Td3rqptMsWNZ5zueGz7I7qtA1qDE_") + + log.Println("sending transaction...") + + // default message ttl is 3 minutes, it is time during which you can send it to blockchain + // if you need to set longer TTL, you could use this method + // w.GetSpec().(*wallet.SpecV4R2).SetMessagesTTL(uint32((10 * time.Minute) / time.Second)) + + // get current wallet seqno from ton center API + w.GetSpec().(*wallet.SpecV4R2).SetSeqnoFetcher(func(ctx context.Context, sub uint32) (uint32, error) { + res, err := tc.V2().GetWalletInformation(ctx, w.WalletAddress()) + if err != nil { + return 0, err + } + return uint32(res.Seqno), nil + }) + + // if our wallet is already deployed, we can skip this step and just use withStateInit = false + contractInfo, err := tc.V2().GetAddressInformation(context.Background(), w.WalletAddress()) + if err != nil { + log.Fatalln("get account info err:", err.Error()) + return + } + + // add state init to deploy, if wallet is not yet deployed + withStateInit := contractInfo.State != "active" + + comment, _ := wallet.CreateCommentCell("Hello from tonutils-go with toncenter api!") + + // if destination wallet is not initialized you should set bounce = true + ext, err := w.PrepareExternalMessageForMany(context.Background(), withStateInit, []*wallet.Message{ + wallet.SimpleMessageAutoBounce(addr, tlb.MustFromTON("0.003"), comment), + }) + if err != nil { + log.Fatalln("BuildTransfer err:", err.Error()) + return + } + + // this hash could be used for transaction discovery in explorers + log.Println("external message hash:", hex.EncodeToString(ext.NormalizedHash())) + + // if you wish to send a message from a diff source, or later, you could serialize it to BoC + msgCell, _ := tlb.ToCell(ext) + + // send message to blockchain + if err = tc.V2().SendBoc(context.Background(), msgCell.ToBOC()); err != nil { + log.Fatalln("Failed to send external message:", err.Error()) + return + } + + log.Println("transaction sent, we are not waiting for confirmation") + log.Println("track: https://testnet.tonscan.org/tx/" + hex.EncodeToString(ext.NormalizedHash())) +} diff --git a/example/wallet/main.go b/example/wallet/main.go index 6f3df588..87cdf828 100644 --- a/example/wallet/main.go +++ b/example/wallet/main.go @@ -40,7 +40,7 @@ func main() { // seed words of account, you can generate them with any wallet or using wallet.NewSeed() method words := strings.Split("diet diet attack autumn expose honey skate lounge holiday opinion village priority major enroll romance famous motor pact hello rubber express warfare rose whisper", " ") - w, err := wallet.FromSeed(api, words, wallet.ConfigV5R1Final{ + w, err := wallet.FromSeedWithOptions(api, words, wallet.ConfigV5R1Final{ NetworkGlobalID: wallet.MainnetGlobalID, }) if err != nil { diff --git a/go.mod b/go.mod index a3227b65..0a5e16b6 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/xssnick/tonutils-go -go 1.23.0 +go 1.24.0 + +toolchain go1.24.3 require ( filippo.io/edwards25519 v1.1.0 - github.com/sigurn/crc16 v0.0.0-20211026045750-20ab5afb07e3 - github.com/xssnick/raptorq v1.0.0 - golang.org/x/crypto v0.39.0 + github.com/xssnick/raptorq v1.3.0 + golang.org/x/crypto v0.42.0 ) diff --git a/go.sum b/go.sum index bda446e7..e8b019ff 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/sigurn/crc16 v0.0.0-20211026045750-20ab5afb07e3 h1:aQKxg3+2p+IFXXg97McgDGT5zcMrQoi0EICZs8Pgchs= -github.com/sigurn/crc16 v0.0.0-20211026045750-20ab5afb07e3/go.mod h1:9/etS5gpQq9BJsJMWg1wpLbfuSnkm8dPF6FdW2JXVhA= -github.com/xssnick/raptorq v1.0.0 h1:l77lntIV/W/SV9rZjF4wRpIhikQm8nBHtB3h+qiu2cM= -github.com/xssnick/raptorq v1.0.0/go.mod h1:kgEVVsZv2hP+IeV7C7985KIFsDdvYq2ARW234SBA9Q4= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +github.com/xssnick/raptorq v1.3.0 h1:3GoaySKMg/i8rbjhIuqjxpTTO2l3Gs2/Gh7k3GAjvGo= +github.com/xssnick/raptorq v1.3.0/go.mod h1:kgEVVsZv2hP+IeV7C7985KIFsDdvYq2ARW234SBA9Q4= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= diff --git a/liteclient/config.go b/liteclient/config.go index f934b6a8..d3b0f4fb 100644 --- a/liteclient/config.go +++ b/liteclient/config.go @@ -7,6 +7,7 @@ import ( "net/http" "os" "strconv" + "time" ) var ( @@ -19,6 +20,12 @@ func GetConfigFromUrl(ctx context.Context, url string) (*GlobalConfig, error) { return nil, err } + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 10*time.Second) + defer cancel() + } + req = req.WithContext(ctx) req.Header.Set("Accept", "*/*") diff --git a/liteclient/connection.go b/liteclient/connection.go index e84a6d6c..2d4cd670 100644 --- a/liteclient/connection.go +++ b/liteclient/connection.go @@ -385,7 +385,7 @@ func readSize(conn net.Conn, crypt cipher.Stream) (uint32, error) { sz := binary.LittleEndian.Uint32(size) - if sz > 10<<20 { + if sz > 16<<20 { return 0, fmt.Errorf("too big size of packet: %s", hex.EncodeToString(size)) } @@ -393,7 +393,7 @@ func readSize(conn net.Conn, crypt cipher.Stream) (uint32, error) { } func readData(conn net.Conn, crypt cipher.Stream, sz uint32) ([]byte, error) { - if sz > 8<<20 { + if sz > 16<<20 { return nil, fmt.Errorf("too big packet") } diff --git a/liteclient/integration_test.go b/liteclient/integration_test.go index eb1f4d3a..2eef666b 100644 --- a/liteclient/integration_test.go +++ b/liteclient/integration_test.go @@ -7,7 +7,6 @@ import ( "fmt" "github.com/xssnick/tonutils-go/adnl" "github.com/xssnick/tonutils-go/tl" - "github.com/xssnick/tonutils-go/tlb" "reflect" "testing" "time" @@ -22,7 +21,14 @@ func init() { type GetMasterchainInf struct{} -type BlockIDExt = tlb.BlockInfo +type BlockIDExt struct { + Workchain int32 `tl:"int"` + Shard int64 `tl:"long"` + SeqNo uint32 `tl:"int"` + RootHash []byte `tl:"int256"` + FileHash []byte `tl:"int256"` +} + type MasterchainInfo struct { Last *BlockIDExt `tl:"struct"` StateRootHash []byte `tl:"int256"` diff --git a/tl/bytes.go b/tl/bytes.go index 6f2269dd..acc9a997 100644 --- a/tl/bytes.go +++ b/tl/bytes.go @@ -7,39 +7,21 @@ import ( "fmt" ) -func ToBytes(buf []byte) []byte { - var data = make([]byte, 0, ((len(buf)+4)/4+1)*4) - - // store buf length - if len(buf) >= 0xFE { - ln := make([]byte, 4) - binary.LittleEndian.PutUint32(ln, uint32(len(buf)<<8)|0xFE) - data = append(data, ln...) - } else { - data = append(data, byte(len(buf))) - } - - data = append(data, buf...) - - // adjust actual length to fit % 4 = 0 - if round := len(data) % 4; round != 0 { - data = append(data, make([]byte, 4-round)...) - } - - return data -} - -func ToBytesToBuffer(buf *bytes.Buffer, data []byte) { +func ToBytesToBuffer(buf *bytes.Buffer, data []byte) error { if len(data) == 0 { // fast path for empty slice buf.Write(make([]byte, 4)) - return + return nil } prevLen := buf.Len() // store buf length if len(data) >= 0xFE { + if len(data) >= 1<<24 { + return fmt.Errorf("too big bytes len, TL bytes array limited by 1<<24") + } + ln := make([]byte, 4) binary.LittleEndian.PutUint32(ln, uint32(len(data)<<8)|0xFE) buf.Write(ln) @@ -55,6 +37,7 @@ func ToBytesToBuffer(buf *bytes.Buffer, data []byte) { buf.WriteByte(0) } } + return nil } func RemapBufferAsSlice(buf *bytes.Buffer, from int) { diff --git a/tl/bytes_test.go b/tl/bytes_test.go index 334e4541..424b3967 100644 --- a/tl/bytes_test.go +++ b/tl/bytes_test.go @@ -7,13 +7,18 @@ import ( func TestTLBytes(t *testing.T) { buf := []byte{0xFF, 0xAA} - if !bytes.Equal(append([]byte{2}, append(buf, 0)...), ToBytes(buf)) { + b := &bytes.Buffer{} + ToBytesToBuffer(b, buf) + + if !bytes.Equal(append([]byte{2}, append(buf, 0)...), b.Bytes()) { t.Fatal("not equal small") return } buf = []byte{0xFF, 0xAA, 0xCC} - if !bytes.Equal(append([]byte{3}, buf...), ToBytes(buf)) { + b.Reset() + ToBytesToBuffer(b, buf) + if !bytes.Equal(append([]byte{3}, buf...), b.Bytes()) { t.Fatal("not equal small 2") return } @@ -23,8 +28,11 @@ func TestTLBytes(t *testing.T) { buf = append(buf, 0xFF) } + b.Reset() + ToBytesToBuffer(b, buf) + // corner case + round to 4 - if !bytes.Equal(append([]byte{0xFE, 0xFE, 0x00, 0x00}, append(buf, 0x00, 0x00)...), ToBytes(buf)) { + if !bytes.Equal(append([]byte{0xFE, 0xFE, 0x00, 0x00}, append(buf, 0x00, 0x00)...), b.Bytes()) { t.Fatal("not equal middle") return } @@ -33,9 +41,16 @@ func TestTLBytes(t *testing.T) { for i := 0; i < 1217; i++ { buf = append(buf, byte(i%256)) } + b.Reset() + ToBytesToBuffer(b, buf) - if !bytes.Equal(append([]byte{0xFE, 0xC1, 0x04, 0x00}, append(buf, 0x00, 0x00, 0x00)...), ToBytes(buf)) { + if !bytes.Equal(append([]byte{0xFE, 0xC1, 0x04, 0x00}, append(buf, 0x00, 0x00, 0x00)...), b.Bytes()) { t.Fatal("not equal big") return } + + b.Reset() + if err := ToBytesToBuffer(b, make([]byte, 1<<24)); err == nil { + t.Fatal("should be error") + } } diff --git a/tl/loader.go b/tl/loader.go index 21f11ebd..97e654ee 100644 --- a/tl/loader.go +++ b/tl/loader.go @@ -48,7 +48,7 @@ var Logger = func(a ...any) {} var DefaultSerializeBufferSize = 1024 -func splitAllowed(leftTags []string) []string { +func parseAllowed(leftTags []string) []string { if len(leftTags) == 0 { return nil } @@ -62,9 +62,20 @@ func splitAllowed(leftTags []string) []string { allowed = allowed[1 : len(allowed)-1] list := strings.Split(allowed, ",") - return list + var finalList = make([]string, 0, len(list)) + for _, s := range list { + if vals, ok := _allowedGroup[s]; ok { + // short group alias for many types + finalList = append(finalList, vals...) + } else { + finalList = append(finalList, s) + } + } + + return finalList } +var _allowedGroup = map[string][]string{} var _structInfoTable = map[string]*structInfo{} var _structInfoTableTLNames = map[string]*structInfo{} @@ -120,6 +131,17 @@ func getStructInfoReferenceByShortName(name string) *structInfo { return si } +// RegisterAllowedGroup - register an alias for a group of types to use in tl tags in [brackets] +// In case the name is already registered, types will be appended +func RegisterAllowedGroup(name string, names ...string) { + initMx.Lock() // we lock because init() methods in independent packages can be called in parallel + defer initMx.Unlock() + + grp := _allowedGroup[name] + grp = append(grp, names...) + _allowedGroup[name] = grp +} + func Register(typ any, tl string) uint32 { t := reflect.TypeOf(typ) return RegisterWithFabric(typ, tl, func() reflect.Value { @@ -132,7 +154,7 @@ var initMx sync.Mutex func RegisterWithFabric(typ any, tl string, fab func() reflect.Value) uint32 { initMx.Lock() // we lock because init() methods in independent packages can be called in parallel defer initMx.Unlock() - + t := reflect.TypeOf(typ) si := getStructInfoReference(t) diff --git a/tl/loader_test.go b/tl/loader_test.go index 467c4637..ba791f94 100644 --- a/tl/loader_test.go +++ b/tl/loader_test.go @@ -97,6 +97,7 @@ func init() { Register(TestInner{}, "in 123") // root 777 Register(TestTL{}, "root 222") Register(TestManual{}, "manual val") + Register(AnyBig{}, "anybig") buf := make([]byte, 4) binary.LittleEndian.PutUint32(buf, RegisterWithFabric(Small{}, "small 123", func() reflect.Value { @@ -173,3 +174,21 @@ func BenchmarkParse(b *testing.B) { } _ = tst } + +type AnyBig struct { + Data [][]byte `tl:"vector bytes"` +} + +func BenchmarkSerialize(b *testing.B) { + v := AnyBig{} + for i := 0; i < 100; i++ { + v.Data = append(v.Data, make([]byte, 1<<20)) + } + + for i := 0; i < b.N; i++ { + _, err := Serialize(&v, true) + if err != nil { + panic(err) + } + } +} diff --git a/tl/precompile.go b/tl/precompile.go index 9aa4aa42..fe6d231c 100644 --- a/tl/precompile.go +++ b/tl/precompile.go @@ -29,6 +29,7 @@ const ( _ExecuteTypeLong _ExecuteTypeBool _ExecuteTypeVector + _ExecuteTypeIP6 ) const ( @@ -169,7 +170,7 @@ func compileField(parent reflect.Type, f reflect.StructField, tags []string) *fi } structFlags |= _StructFlagsInterface - list := splitAllowed(tags[2:]) + list := parseAllowed(tags[2:]) for _, s := range list { info.allowedTypes = append(info.allowedTypes, getStructInfoReferenceByShortName(s)) } @@ -196,7 +197,11 @@ func compileField(parent reflect.Type, f reflect.StructField, tags []string) *fi if tags[0] == "int256" { info.typ = _ExecuteTypeInt256 } else if tags[0] == "int128" { - info.typ = _ExecuteTypeInt128 + if f.Type == reflect.TypeOf(net.IP{}) { + info.typ = _ExecuteTypeIP6 + } else { + info.typ = _ExecuteTypeInt128 + } } else { info.typ = _ExecuteTypeBytes } @@ -567,6 +572,17 @@ func executeParse(buf []byte, startPtr uintptr, si *structInfo, noCopy bool) ([] bts[0], bts[1], bts[2], bts[3] = buf[3], buf[2], buf[1], buf[0] *(*[]byte)(ptr) = bts buf = buf[4:] + case _ExecuteTypeIP6: + if len(buf) < 16 { + return nil, fmt.Errorf("not enough bytes to parse ip v6 field %s", field.String()) + } + + bts := make([]byte, 16) + for i := 0; i < 16; i++ { + bts[i] = buf[15-i] + } + *(*[]byte)(ptr) = bts + buf = buf[16:] case _ExecuteTypeSingleCell: var bts []byte if bts, buf, err = FromBytes(buf); err != nil { @@ -786,9 +802,13 @@ func executeSerialize(buf *bytes.Buffer, startPtr uintptr, si *structInfo) error binary.LittleEndian.PutUint32(tmp, flags) buf.Write(tmp) case _ExecuteTypeString: - ToBytesToBuffer(buf, []byte(*(*string)(ptr))) + if err := ToBytesToBuffer(buf, []byte(*(*string)(ptr))); err != nil { + return fmt.Errorf("failed to serialize string field %s: %w", field.String(), err) + } case _ExecuteTypeBytes: - ToBytesToBuffer(buf, *(*[]byte)(ptr)) + if err := ToBytesToBuffer(buf, *(*[]byte)(ptr)); err != nil { + return fmt.Errorf("failed to serialize bytes field %s: %w", field.String(), err) + } case _ExecuteTypeInt256: if bts := *(*[]byte)(ptr); len(bts) == 32 { buf.Write(*(*[]byte)(ptr)) @@ -838,30 +858,50 @@ func executeSerialize(buf *bytes.Buffer, startPtr uintptr, si *structInfo) error } else { return fmt.Errorf("invalid ip size %d in field %s", len(ipBytes), field.String()) } + case _ExecuteTypeIP6: + ipBytes := *(*net.IP)(ptr) + if len(ipBytes) == net.IPv4len { + ipBytes = ipBytes.To16() + if ipBytes == nil { + return fmt.Errorf("invalid ip v6 in field %s", field.String()) + } + } + if len(ipBytes) == net.IPv6len { + buf.Write(ipBytes) + } else if len(ipBytes) == 0 { + buf.Write(make([]byte, 16)) + } else { + return fmt.Errorf("invalid ip size %d in field %s", len(ipBytes), field.String()) + } case _ExecuteTypeSingleCell: c := *(**cell.Cell)(ptr) if c == nil { if field.meta.(bool) { - ToBytesToBuffer(buf, nil) + _ = ToBytesToBuffer(buf, nil) break } return fmt.Errorf("nil cell is not allowed in field %s", field.String()) } - ToBytesToBuffer(buf, (*(**cell.Cell)(ptr)).ToBOCWithFlags(false)) + + if err := ToBytesToBuffer(buf, (*(**cell.Cell)(ptr)).ToBOCWithFlags(false)); err != nil { + return fmt.Errorf("failed to serialize cell field %s: %w", field.String(), err) + } case _ExecuteTypeSliceCell: c := *(*[]*cell.Cell)(ptr) flag := field.meta.(uint32) num := flag & 0x7FFFFFFF if len(c) == 0 && flag&(1<<31) != 0 { - ToBytesToBuffer(buf, nil) + _ = ToBytesToBuffer(buf, nil) break } if num > 0 && uint32(len(c)) != num { return fmt.Errorf("incorrect cells len %d in field %s", len(c), field.String()) } - ToBytesToBuffer(buf, cell.ToBOCWithFlags(c, false)) + if err := ToBytesToBuffer(buf, cell.ToBOCWithFlags(c, false)); err != nil { + return fmt.Errorf("failed to serialize slice cell field %s: %w", field.String(), err) + } case _ExecuteTypeStruct: info := field.structInfo structFlags := field.meta.(uint32) diff --git a/tlb/account.go b/tlb/account.go index 6f4ec100..7e6f0103 100644 --- a/tlb/account.go +++ b/tlb/account.go @@ -5,7 +5,7 @@ import ( "fmt" "math/big" - "github.com/sigurn/crc16" + "github.com/xssnick/tonutils-go/crc16" "github.com/xssnick/tonutils-go/address" "github.com/xssnick/tonutils-go/tvm/cell" @@ -275,5 +275,5 @@ func (a *Account) HasGetMethod(name string) bool { func MethodNameHash(name string) uint64 { // https://github.com/ton-blockchain/ton/blob/24dc184a2ea67f9c47042b4104bbb4d82289fac1/crypto/smc-envelope/SmartContract.h#L75 - return uint64(crc16.Checksum([]byte(name), crc16.MakeTable(crc16.CRC16_XMODEM))) | 0x10000 + return uint64(crc16.ChecksumXMODEM([]byte(name))) | 0x10000 } diff --git a/tlb/block.go b/tlb/block.go index 66d2e5cc..65a6d5c0 100644 --- a/tlb/block.go +++ b/tlb/block.go @@ -1,21 +1,11 @@ package tlb import ( - "bytes" "fmt" "github.com/xssnick/tonutils-go/tvm/cell" ) -// Deprecated: use ton.BlockIDExt -type BlockInfo struct { - Workchain int32 `tl:"int"` - Shard int64 `tl:"long"` - SeqNo uint32 `tl:"int"` - RootHash []byte `tl:"int256"` - FileHash []byte `tl:"int256"` -} - type StateUpdate struct { Old any `tlb:"^ [ShardStateUnsplit,ShardStateSplit]"` New *cell.Cell `tlb:"^"` @@ -118,26 +108,6 @@ type BlkPrevInfo struct { Prev2 *ExtBlkRef } -func (h *BlockInfo) Equals(h2 *BlockInfo) bool { - return h.Shard == h2.Shard && h.SeqNo == h2.SeqNo && h.Workchain == h2.Workchain && - bytes.Equal(h.FileHash, h2.FileHash) && bytes.Equal(h.RootHash, h2.RootHash) -} - -func (h *BlockInfo) Copy() *BlockInfo { - root := make([]byte, len(h.RootHash)) - file := make([]byte, len(h.FileHash)) - copy(root, h.RootHash) - copy(file, h.FileHash) - - return &BlockInfo{ - Workchain: h.Workchain, - Shard: h.Shard, - SeqNo: h.SeqNo, - RootHash: root, - FileHash: file, - } -} - func (h *BlockHeader) LoadFromCell(loader *cell.Slice) error { var infoPart blockInfoPart err := LoadFromCell(&infoPart, loader) @@ -240,66 +210,3 @@ func ConvertShardIdentToShard(si ShardIdent) (workchain int32, shard uint64) { shard |= pow2 return si.WorkchainID, shard } - -func shardChild(shard uint64, left bool) uint64 { - x := lowerBit64(shard) >> 1 - if left { - return shard - x - } - return shard + x -} - -func shardParent(shard uint64) uint64 { - x := lowerBit64(shard) - return (shard - x) | (x << 1) -} - -func lowerBit64(x uint64) uint64 { - return x & bitsNegate64(x) -} - -func bitsNegate64(x uint64) uint64 { - return ^x + 1 -} - -func (h *BlockHeader) GetParentBlocks() ([]*BlockInfo, error) { - var parents []*BlockInfo - workchain, shard := ConvertShardIdentToShard(h.Shard) - - if !h.AfterMerge && !h.AfterSplit { - return []*BlockInfo{{ - Workchain: workchain, - SeqNo: h.PrevRef.Prev1.SeqNo, - RootHash: h.PrevRef.Prev1.RootHash, - FileHash: h.PrevRef.Prev1.FileHash, - Shard: int64(shard), - }}, nil - } else if !h.AfterMerge && h.AfterSplit { - return []*BlockInfo{{ - Workchain: workchain, - SeqNo: h.PrevRef.Prev1.SeqNo, - RootHash: h.PrevRef.Prev1.RootHash, - FileHash: h.PrevRef.Prev1.FileHash, - Shard: int64(shardParent(shard)), - }}, nil - } - - if h.PrevRef.Prev2 == nil { - return nil, fmt.Errorf("must be 2 parent blocks after merge") - } - parents = append(parents, &BlockInfo{ - Workchain: workchain, - SeqNo: h.PrevRef.Prev1.SeqNo, - RootHash: h.PrevRef.Prev1.RootHash, - FileHash: h.PrevRef.Prev1.FileHash, - Shard: int64(shardChild(shard, true)), - }) - parents = append(parents, &BlockInfo{ - Workchain: workchain, - SeqNo: h.PrevRef.Prev2.SeqNo, - RootHash: h.PrevRef.Prev2.RootHash, - FileHash: h.PrevRef.Prev2.FileHash, - Shard: int64(shardChild(shard, false)), - }) - return parents, nil -} diff --git a/tlb/block_test.go b/tlb/block_test.go index f23a363f..005eed72 100644 --- a/tlb/block_test.go +++ b/tlb/block_test.go @@ -14,13 +14,6 @@ func TestBlockMaster(t *testing.T) { if err := LoadFromCell(&block, c.BeginParse()); err != nil { t.Fatal(err) } - - parents, err := block.BlockInfo.GetParentBlocks() - if err != nil { - t.Fatal(err) - } - - println(len(parents)) } func TestBlockNotMaster(t *testing.T) { @@ -31,11 +24,4 @@ func TestBlockNotMaster(t *testing.T) { if err := LoadFromCell(&block, c.BeginParse()); err != nil { t.Fatal(err) } - - parents, err := block.BlockInfo.GetParentBlocks() - if err != nil { - t.Fatal(err) - } - - println(len(parents)) } diff --git a/tlb/coins.go b/tlb/coins.go index b85df469..f66d5ffa 100644 --- a/tlb/coins.go +++ b/tlb/coins.go @@ -33,10 +33,17 @@ func (g Coins) String() string { return "0" } - a := g.val.String() + // add sign if negative + sign := "" + val := g.val + if val.Sign() < 0 { + sign = "-" + val = new(big.Int).Abs(val) + } + + a := val.String() if a == "0" { - // process 0 faster and simpler - return a + return "0" } splitter := len(a) - g.decimals @@ -59,7 +66,7 @@ func (g Coins) String() string { } } - return a + return sign + a } // Deprecated: use Nano @@ -227,7 +234,7 @@ func (g Coins) MarshalJSON() ([]byte, error) { func (g *Coins) UnmarshalJSON(data []byte) error { if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return fmt.Errorf("invalid data") + return fmt.Errorf("invalid coins data") } data = data[1 : len(data)-1] @@ -242,9 +249,9 @@ func (g *Coins) UnmarshalJSON(data []byte) error { return nil } -func (g *Coins) Compare(coins *Coins) int { +func (g Coins) Compare(coins Coins) int { if g.decimals != coins.decimals { - panic("invalid comparsion") + panic("invalid comparison") } return g.Nano().Cmp(coins.Nano()) @@ -252,7 +259,7 @@ func (g *Coins) Compare(coins *Coins) int { // MustAdd adds the provided coins to the current coins and returns the result. // It panics if the operation fails (e.g., due to decimal mismatch or overflow). -func (g *Coins) MustAdd(coins *Coins) *Coins { +func (g Coins) MustAdd(coins Coins) Coins { result, err := g.Add(coins) if err != nil { panic(err) @@ -264,17 +271,17 @@ func (g *Coins) MustAdd(coins *Coins) *Coins { // Add adds the provided coins to the current coins and returns the result. // Returns an error if the coins have different decimal places or if the result // would overflow the maximum allowed value. -func (g *Coins) Add(coins *Coins) (*Coins, error) { +func (g Coins) Add(coins Coins) (Coins, error) { if g.decimals != coins.decimals { - return &Coins{}, errDecimalMismatch + return Coins{}, errDecimalMismatch } - result := &Coins{ + result := Coins{ decimals: g.decimals, val: new(big.Int).Add(g.Nano(), coins.Nano()), } if tooBigForVarUint16(result.val) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } return result, nil @@ -282,7 +289,7 @@ func (g *Coins) Add(coins *Coins) (*Coins, error) { // MustSub subtracts the provided coins from the current coins and returns the result. // It panics if the operation fails (e.g., due to decimal mismatch or overflow). -func (g *Coins) MustSub(coins *Coins) *Coins { +func (g Coins) MustSub(coins Coins) Coins { result, err := g.Sub(coins) if err != nil { panic(err) @@ -294,17 +301,17 @@ func (g *Coins) MustSub(coins *Coins) *Coins { // Sub subtracts the provided coins from the current coins and returns the result. // Returns an error if the coins have different decimal places or if the result // would overflow the maximum allowed value. -func (g *Coins) Sub(coins *Coins) (*Coins, error) { +func (g Coins) Sub(coins Coins) (Coins, error) { if g.decimals != coins.decimals { - return &Coins{}, errDecimalMismatch + return Coins{}, errDecimalMismatch } - result := &Coins{ + result := Coins{ decimals: g.decimals, val: new(big.Int).Sub(g.Nano(), coins.Nano()), } if tooBigForVarUint16(result.val) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } return result, nil @@ -312,7 +319,7 @@ func (g *Coins) Sub(coins *Coins) (*Coins, error) { // MustMul multiplies the current coins by the provided big.Int and returns the result. // It panics if the operation fails (e.g., due to overflow). -func (g *Coins) MustMul(x *big.Int) *Coins { +func (g Coins) MustMul(x *big.Int) Coins { result, err := g.Mul(x) if err != nil { panic(err) @@ -323,13 +330,13 @@ func (g *Coins) MustMul(x *big.Int) *Coins { // Mul multiplies the current coins by the provided big.Int and returns the result. // Returns an error if the result would overflow the maximum allowed value. -func (g *Coins) Mul(x *big.Int) (*Coins, error) { - result := &Coins{ +func (g Coins) Mul(x *big.Int) (Coins, error) { + result := Coins{ decimals: g.decimals, val: new(big.Int).Mul(g.val, x), } if tooBigForVarUint16(result.val) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } return result, nil @@ -337,7 +344,7 @@ func (g *Coins) Mul(x *big.Int) (*Coins, error) { // MustMulRat multiplies the current coins by the provided big.Rat and returns the result. // It panics if the operation fails (e.g., due to division by zero or overflow). -func (g *Coins) MustMulRat(r *big.Rat) *Coins { +func (g Coins) MustMulRat(r *big.Rat) Coins { result, err := g.MulRat(r) if err != nil { panic(err) @@ -348,13 +355,13 @@ func (g *Coins) MustMulRat(r *big.Rat) *Coins { // MulRat multiplies the current coins by the provided big.Rat and returns the result. // Returns an error if the denominator is zero or if the result would overflow the maximum allowed value. -func (g *Coins) MulRat(r *big.Rat) (*Coins, error) { +func (g Coins) MulRat(r *big.Rat) (Coins, error) { // Get numerator and denominator num := r.Num() den := r.Denom() if den.Sign() == 0 { - return &Coins{}, errDivisionByZero + return Coins{}, errDivisionByZero } // Calculate new nano value: (g.val * num) / den @@ -363,10 +370,10 @@ func (g *Coins) MulRat(r *big.Rat) (*Coins, error) { den, ) if tooBigForVarUint16(newVal) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } - return &Coins{ + return Coins{ decimals: g.decimals, val: newVal, }, nil @@ -374,7 +381,7 @@ func (g *Coins) MulRat(r *big.Rat) (*Coins, error) { // MustDiv divides the current coins by the provided big.Int and returns the result. // It panics if the operation fails (e.g., due to division by zero or overflow). -func (g *Coins) MustDiv(x *big.Int) *Coins { +func (g Coins) MustDiv(x *big.Int) Coins { result, err := g.Div(x) if err != nil { panic(err) @@ -385,17 +392,17 @@ func (g *Coins) MustDiv(x *big.Int) *Coins { // Div divides the current coins by the provided big.Int and returns the result. // Returns an error if the divisor is zero or if the result would overflow the maximum allowed value. -func (g *Coins) Div(x *big.Int) (*Coins, error) { +func (g Coins) Div(x *big.Int) (Coins, error) { if x.Sign() == 0 { - return &Coins{}, errDivisionByZero + return Coins{}, errDivisionByZero } - result := &Coins{ + result := Coins{ decimals: g.decimals, val: new(big.Int).Div(g.Nano(), x), } if tooBigForVarUint16(result.val) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } return result, nil @@ -403,7 +410,7 @@ func (g *Coins) Div(x *big.Int) (*Coins, error) { // MustDivRat divides the current coins by the provided big.Rat and returns the result. // It panics if the operation fails (e.g., due to division by zero or overflow). -func (g *Coins) MustDivRat(r *big.Rat) *Coins { +func (g Coins) MustDivRat(r *big.Rat) Coins { result, err := g.DivRat(r) if err != nil { panic(err) @@ -415,13 +422,13 @@ func (g *Coins) MustDivRat(r *big.Rat) *Coins { // DivRat divides the current coins by the provided big.Rat and returns the result. // This is equivalent to multiplying by the reciprocal of the rational number. // Returns an error if the rational has zero numerator or denominator, or if the result would overflow. -func (g *Coins) DivRat(r *big.Rat) (*Coins, error) { +func (g Coins) DivRat(r *big.Rat) (Coins, error) { // Get numerator and denominator num := r.Num() den := r.Denom() if num.Sign() == 0 || den.Sign() == 0 { - return &Coins{}, errDivisionByZero + return Coins{}, errDivisionByZero } // Calculate new nano value: (g.val * den) / num @@ -430,10 +437,10 @@ func (g *Coins) DivRat(r *big.Rat) (*Coins, error) { num, ) if tooBigForVarUint16(newVal) { - return &Coins{}, errTooBigForVarUint16 + return Coins{}, errTooBigForVarUint16 } - return &Coins{ + return Coins{ decimals: g.decimals, val: newVal, }, nil @@ -441,8 +448,8 @@ func (g *Coins) DivRat(r *big.Rat) (*Coins, error) { // Neg returns a new Coins value representing the negation of the original value. // The number of decimals remains the same. -func (g *Coins) Neg() *Coins { - result := &Coins{ +func (g Coins) Neg() Coins { + result := Coins{ decimals: g.decimals, val: new(big.Int).Neg(g.Nano()), } @@ -451,8 +458,8 @@ func (g *Coins) Neg() *Coins { // Abs returns a new Coins value representing the absolute value of the original value. // The number of decimals remains the same. -func (g *Coins) Abs() *Coins { - return &Coins{ +func (g Coins) Abs() Coins { + return Coins{ decimals: g.decimals, val: new(big.Int).Abs(g.Nano()), } @@ -460,49 +467,49 @@ func (g *Coins) Abs() *Coins { // GreaterThan returns true if the current coins amount is greater than the // given coins amount -func (g *Coins) GreaterThan(coins *Coins) bool { +func (g Coins) GreaterThan(coins Coins) bool { return g.Compare(coins) > 0 } // GreaterOrEqual returns true if the current coins amount is greater than or // equal to the given coins amount -func (g *Coins) GreaterOrEqual(coins *Coins) bool { +func (g Coins) GreaterOrEqual(coins Coins) bool { return g.Compare(coins) >= 0 } // LessThan returns true if the current coins amount is less than the given coins // amount -func (g *Coins) LessThan(coins *Coins) bool { +func (g Coins) LessThan(coins Coins) bool { return g.Compare(coins) < 0 } // LessOrEqual returns true if the current coins amount is less than or equal to // the given coins amount -func (g *Coins) LessOrEqual(coins *Coins) bool { +func (g Coins) LessOrEqual(coins Coins) bool { return g.Compare(coins) <= 0 } // Equals returns true if the current coins amount is equal to the given coins // amount -func (g *Coins) Equals(coins *Coins) bool { +func (g Coins) Equals(coins Coins) bool { return g.Compare(coins) == 0 } // IsZero returns true if the coins amount is zero -func (g *Coins) IsZero() bool { +func (g Coins) IsZero() bool { return g.Nano().Sign() == 0 } // IsPositive returns true if the coins amount is greater than zero -func (g *Coins) IsPositive() bool { +func (g Coins) IsPositive() bool { return g.Nano().Sign() > 0 } // IsNegative returns true if the coins amount is less than zero -func (g *Coins) IsNegative() bool { +func (g Coins) IsNegative() bool { return g.Nano().Sign() < 0 } -func (g *Coins) Decimals() int { +func (g Coins) Decimals() int { return g.decimals } diff --git a/tlb/coins_test.go b/tlb/coins_test.go index 3017909e..7e8b7d49 100644 --- a/tlb/coins_test.go +++ b/tlb/coins_test.go @@ -301,7 +301,7 @@ func TestCoins_GreaterThan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c1 := MustFromTON(tt.coins1) c2 := MustFromTON(tt.coins2) - if got := c1.GreaterThan(&c2); got != tt.want { + if got := c1.GreaterThan(c2); got != tt.want { t.Logf("c1: %s, c2: %s", c1, c2) t.Errorf("GreaterThan() = %v, want %v", got, tt.want) } @@ -335,7 +335,7 @@ func TestCoins_GreaterOrEqual(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c1 := MustFromTON(tt.coins1) c2 := MustFromTON(tt.coins2) - if got := c1.GreaterOrEqual(&c2); got != tt.want { + if got := c1.GreaterOrEqual(c2); got != tt.want { t.Errorf("GreaterOrEqual() = %v, want %v", got, tt.want) } }) @@ -367,7 +367,7 @@ func TestCoins_LessThan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c1 := MustFromTON(tt.coins1) c2 := MustFromTON(tt.coins2) - if got := c1.LessThan(&c2); got != tt.want { + if got := c1.LessThan(c2); got != tt.want { t.Errorf("LessThan() = %v, want %v", got, tt.want) } }) @@ -399,7 +399,7 @@ func TestCoins_LessOrEqual(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c1 := MustFromTON(tt.coins1) c2 := MustFromTON(tt.coins2) - if got := c1.LessOrEqual(&c2); got != tt.want { + if got := c1.LessOrEqual(c2); got != tt.want { t.Errorf("LessOrEqual() = %v, want %v", got, tt.want) } }) @@ -427,7 +427,7 @@ func TestCoins_Equals(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c1 := MustFromTON(tt.coins1) c2 := MustFromTON(tt.coins2) - if got := c1.Equals(&c2); got != tt.want { + if got := c1.Equals(c2); got != tt.want { t.Errorf("Equals() = %v, want %v", got, tt.want) } }) @@ -607,7 +607,7 @@ func TestCoins_Add(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.a.Add(&tt.b) + got, err := tt.a.Add(tt.b) if !errors.Is(err, tt.wantErr) { t.Errorf("Add() error = %v, wantErr %v", err, tt.wantErr) @@ -615,7 +615,7 @@ func TestCoins_Add(t *testing.T) { } if tt.wantErr == nil { - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("Add() got = %v, want %v", got, tt.want) } if got.Decimals() != tt.want.Decimals() { @@ -676,7 +676,7 @@ func TestCoins_MustAdd(t *testing.T) { }() // We only call MustAdd to check for panics, result checked in TestCoins_Add - _ = tt.a.MustAdd(&tt.b) + _ = tt.a.MustAdd(tt.b) }) } } @@ -796,7 +796,7 @@ func TestCoins_Sub(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tt.a.Sub(&tt.b) + got, err := tt.a.Sub(tt.b) if !errors.Is(err, tt.wantErr) { t.Errorf("Sub() error = %v, wantErr %v", err, tt.wantErr) @@ -865,7 +865,7 @@ func TestCoins_MustSub(t *testing.T) { } }() - _ = tt.a.MustSub(&tt.b) + _ = tt.a.MustSub(tt.b) }) } } @@ -964,7 +964,7 @@ func TestCoins_Mul(t *testing.T) { } if tt.wantErr == nil { - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("Mul() got = %v, want %v", got, tt.want) } if got.Decimals() != tt.want.Decimals() { @@ -1106,7 +1106,7 @@ func TestCoins_Div(t *testing.T) { } if tt.wantErr == nil { - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("Div() got = %v, want %v", got, tt.want) } if got.Decimals() != tt.want.Decimals() { @@ -1194,7 +1194,7 @@ func TestCoins_Neg(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got := tt.a.Neg() - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("Neg() got = %v, want %v", got, tt.want) } if got.Decimals() != tt.want.Decimals() { @@ -1241,7 +1241,7 @@ func TestCoins_Abs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got := tt.a.Abs() - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("Abs() got = %v, want %v", got, tt.want) } if got.Decimals() != tt.want.Decimals() { @@ -1357,7 +1357,7 @@ func TestCoins_MulRat(t *testing.T) { } if tt.wantErr == nil { - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("MulRat() got = %v (%s), want %v (%s)", got, got.Nano().String(), tt.want, tt.want.Nano().String()) } if got.Decimals() != tt.want.Decimals() { @@ -1529,7 +1529,7 @@ func TestCoins_DivRat(t *testing.T) { } if tt.wantErr == nil { - if !got.Equals(&tt.want) { + if !got.Equals(tt.want) { t.Errorf("DivRat() got = %v (%s), want %v (%s)", got, got.Nano().String(), tt.want, tt.want.Nano().String()) } if got.Decimals() != tt.want.Decimals() { @@ -1599,3 +1599,32 @@ func TestCoins_MustDivRat(t *testing.T) { }) } } + +func TestCoins_NegStr(t *testing.T) { + c, err := FromDecimal("-1.22", 9) + if err != nil { + t.Fatal(err) + } + + if c.String() != "-1.22" { + t.Fatalf("NegStr() got = %s, want %s", c.String(), "-1.22") + } + + c, err = FromDecimal("-0.0011", 9) + if err != nil { + t.Fatal(err) + } + + if c.String() != "-0.0011" { + t.Fatalf("NegStr() got = %s, want %s", c.String(), "-0.0011") + } + + c, err = FromDecimal("-0.01111", 2) + if err != nil { + t.Fatal(err) + } + + if c.String() != "-0.01" { + t.Fatalf("NegStr() got = %s, want %s", c.String(), "-0.01") + } +} diff --git a/tlb/shard.go b/tlb/shard.go index dad240e2..fe4b8ef8 100644 --- a/tlb/shard.go +++ b/tlb/shard.go @@ -151,6 +151,27 @@ type ShardDescB struct { FundsCreated CurrencyCollection `tlb:"."` } +func ShardChild(shard uint64, left bool) uint64 { + x := lowerBit64(shard) >> 1 + if left { + return shard - x + } + return shard + x +} + +func ShardParent(shard uint64) uint64 { + x := lowerBit64(shard) + return (shard - x) | (x << 1) +} + +func lowerBit64(x uint64) uint64 { + return x & bitsNegate64(x) +} + +func bitsNegate64(x uint64) uint64 { + return ^x + 1 +} + func (s ShardID) IsSibling(with ShardID) bool { return (s^with) != 0 && ((s ^ with) == ((s & ShardID(bitsNegate64(uint64(s)))) << 1)) } diff --git a/tlb/stack.go b/tlb/stack.go index 2567240a..8f174019 100644 --- a/tlb/stack.go +++ b/tlb/stack.go @@ -3,6 +3,7 @@ package tlb import ( "errors" "fmt" + "github.com/xssnick/tonutils-go/address" "math/big" "reflect" @@ -120,6 +121,16 @@ func SerializeStackValue(b *cell.Builder, val any) error { } } + // address is often used as a value, but it was not obvious + // that it should be a slice, so we convert it internally + if addr, ok := val.(*address.Address); ok { + ab := cell.BeginCell() + if err := ab.StoreAddr(addr); err != nil { + return fmt.Errorf("failed to store address: %w", err) + } + val = ab.ToSlice() + } + switch v := val.(type) { case nil: b.MustStoreUInt(0x00, 8) diff --git a/ton/block.go b/ton/block.go index 5fe1e155..112ba863 100644 --- a/ton/block.go +++ b/ton/block.go @@ -140,8 +140,33 @@ type Signature struct { type Object struct{} type True struct{} -// TODO: will be moved here in the next version -type BlockIDExt = tlb.BlockInfo +type BlockIDExt struct { + Workchain int32 `tl:"int"` + Shard int64 `tl:"long"` + SeqNo uint32 `tl:"int"` + RootHash []byte `tl:"int256"` + FileHash []byte `tl:"int256"` +} + +func (h *BlockIDExt) Equals(h2 *BlockIDExt) bool { + return h.Shard == h2.Shard && h.SeqNo == h2.SeqNo && h.Workchain == h2.Workchain && + bytes.Equal(h.FileHash, h2.FileHash) && bytes.Equal(h.RootHash, h2.RootHash) +} + +func (h *BlockIDExt) Copy() *BlockIDExt { + root := make([]byte, len(h.RootHash)) + file := make([]byte, len(h.FileHash)) + copy(root, h.RootHash) + copy(file, h.FileHash) + + return &BlockIDExt{ + Workchain: h.Workchain, + Shard: h.Shard, + SeqNo: h.SeqNo, + RootHash: root, + FileHash: file, + } +} type MasterchainInfo struct { Last *BlockIDExt `tl:"struct"` @@ -324,7 +349,7 @@ func (c *APIClient) CurrentMasterchainInfo(ctx context.Context) (_ *BlockIDExt, master.mx.Lock() defer master.mx.Unlock() - if time.Now().After(master.updatedAt.Add(5 * time.Second)) { + if time.Now().After(master.updatedAt.Add(1 * time.Second)) { ctx = c.client.StickyContext(ctx) var block *BlockIDExt @@ -409,6 +434,20 @@ func (c *APIClient) LookupBlock(ctx context.Context, workchain int32, shard int6 // GetBlockData - get block detailed information func (c *APIClient) GetBlockData(ctx context.Context, block *BlockIDExt) (*tlb.Block, error) { + cl, err := c.GetBlockDataAsCell(ctx, block) + if err != nil { + return nil, err + } + + var bData tlb.Block + if err = tlb.LoadFromCell(&bData, cl.BeginParse()); err != nil { + return nil, fmt.Errorf("failed to parse block data: %w", err) + } + return &bData, nil +} + +// GetBlockDataAsCell - get block detailed information as a cell +func (c *APIClient) GetBlockDataAsCell(ctx context.Context, block *BlockIDExt) (*cell.Cell, error) { var resp tl.Serializable err := c.client.QueryLiteserver(ctx, GetBlockData{ID: block}, &resp) if err != nil { @@ -426,18 +465,14 @@ func (c *APIClient) GetBlockData(ctx context.Context, block *BlockIDExt) (*tlb.B return nil, fmt.Errorf("incorrect block") } - var bData tlb.Block - if err = tlb.LoadFromCell(&bData, pl.BeginParse()); err != nil { - return nil, fmt.Errorf("failed to parse block data: %w", err) - } - return &bData, nil + return pl, nil case LSError: return nil, t } return nil, errUnexpectedResponse(resp) } -// GetBlockTransactionsV2 - list of block transactions +// GetBlockTransactionsV2 - a list of block transactions func (c *APIClient) GetBlockTransactionsV2(ctx context.Context, block *BlockIDExt, count uint32, after ...*TransactionID3) ([]TransactionShortInfo, bool, error) { withAfter := uint32(0) var afterTx *TransactionID3 @@ -667,3 +702,45 @@ func (c *APIClient) GetBlockProof(ctx context.Context, known, target *BlockIDExt } return nil, errUnexpectedResponse(resp) } + +func GetParentBlocks(h *tlb.BlockHeader) ([]*BlockIDExt, error) { + var parents []*BlockIDExt + workchain, shard := tlb.ConvertShardIdentToShard(h.Shard) + + if !h.AfterMerge && !h.AfterSplit { + return []*BlockIDExt{{ + Workchain: workchain, + SeqNo: h.PrevRef.Prev1.SeqNo, + RootHash: h.PrevRef.Prev1.RootHash, + FileHash: h.PrevRef.Prev1.FileHash, + Shard: int64(shard), + }}, nil + } else if !h.AfterMerge && h.AfterSplit { + return []*BlockIDExt{{ + Workchain: workchain, + SeqNo: h.PrevRef.Prev1.SeqNo, + RootHash: h.PrevRef.Prev1.RootHash, + FileHash: h.PrevRef.Prev1.FileHash, + Shard: int64(tlb.ShardParent(shard)), + }}, nil + } + + if h.PrevRef.Prev2 == nil { + return nil, fmt.Errorf("must be 2 parent blocks after merge") + } + parents = append(parents, &BlockIDExt{ + Workchain: workchain, + SeqNo: h.PrevRef.Prev1.SeqNo, + RootHash: h.PrevRef.Prev1.RootHash, + FileHash: h.PrevRef.Prev1.FileHash, + Shard: int64(tlb.ShardChild(shard, true)), + }) + parents = append(parents, &BlockIDExt{ + Workchain: workchain, + SeqNo: h.PrevRef.Prev2.SeqNo, + RootHash: h.PrevRef.Prev2.RootHash, + FileHash: h.PrevRef.Prev2.FileHash, + Shard: int64(tlb.ShardChild(shard, false)), + }) + return parents, nil +} diff --git a/ton/block_test.go b/ton/block_test.go index cf34d15d..a3287c6a 100644 --- a/ton/block_test.go +++ b/ton/block_test.go @@ -2,6 +2,7 @@ package ton import ( "encoding/hex" + "github.com/xssnick/tonutils-go/tlb" "github.com/xssnick/tonutils-go/tvm/cell" "testing" ) @@ -35,3 +36,37 @@ func TestLoadShardsFromHashes(t *testing.T) { t.Fatal("should be err") } } + +func TestBlockMaster(t *testing.T) { + boc, _ := hex.DecodeString("b5ee9c72e2020152000100002a490000002400cc00ea0180026202fe033003520361037a03940404047404c0056805a8069a06b4075c079c0806087608c30a160a3a0a5e0b0a0b2a0b4a0b6a0b880ba60bc20bde0bfa0c160c320cd80d5c0d800da00dec0e380e580e780e980eb60ed60ef60f160f360f560f76102010a8110e119011ae11cc11ea120612aa132a13761442148f14ae154415621580159e15bc15da15f81616163416521670168e16ac16ca16d816e616f417021710171e172c173a1748175617641772178017cc17da17e817f6180418121820182e187a1888189618a418b218c018ce18dc18ea18f8190619521976199a19e71a921ab21ad21b1f1b6b1b8a1ba81bf51c411c5e1c7a1cc71d131d2e1d4a1d971de31dfe1e4b1e661f0c1f591fdc2029207b20c721122132217f21cb21ea220a22572276229422e12300234d236c238c23d923f82445249124b024fd251c25c62613269a26e7274c279927e5286628b328d0291d293a298729a429f12a3d2a582afc2b492bc82c152c612cad2ccc2cda2d272d442d912dae2dfb2e182e652e822ecf2eec2f392f562fa32fc0300d302a3077309430e130fe314b316831b531d2321f323c325a3308335533a1343634443491349e34eb34f835453552356035ad35f936063653366036ad36ba370737143722376f377c37c937d6382338d838e6399a3a4e3a5c3aa93ab63ac43ad23b1f3b2c3b793b863bd33be03c2d3c3a3c873c943ce13cee3d3b3d483d953da43df13e683f1c3f693f763f843fd1401d402a40774084409240df412b41384185419241df41ec41fa42ae43624416442244284476449e44f244ff4542454c463046484656466546744684472847d0487848844890491649d64a5c4a6e4b124bd34bdc4c624c7e4d2f4dd04ddc4de84e6e4f2e4fb44fc65086510c511e51c3523152f052f7537d538e54325493041011ef55aaffffff11000100020003000401a09bc7a9870000000004010173ed450000000100ffffffff0000000000000000634e93ea00001d3677b8338000001d3677b83384955d862e00058edb0173ed410173bfbec400000003000000000000002e00050211b8e48dfb4a0eebb004000600070a8a040a13051bcbbbdeccd56f979164b7da81b8e49732e7215334e2b8ce57c41888d03190bd932f59e8bcca9b92cd1032c316407ca6099409a8aedf4146f39e95ffec016e016e000b000c14892736daee89910b52d7041a889bf97c864cfc84eeafba291a1b5b2e931cc1b5e800084a33f6fd0be55a2d75c3eae367b5ba338705f0319041b70e23a5c9b65374cf2739898e58f78b372f9292751451def1be4dc7cf494ef17470574d85c253ef77746b8ea127c00123012401250126009800001d3677a8f1440173ed443de180887d5f5a84d44bd19c87cbb664b0561d5eb81da88c5782b0e36e9a07e4d7fd7d801561f54bffc0cb5c4ec4e855deeeeb6fdf26d4c99a086ffafb93580a022581fa7454b05a2ea2ac0fd3a2a5d348d295400800080008001d43b9aca00250775d8011954fc400080201200009000a0015be000003bcb355ab466ad00015bfffffffbcbd0efda563d0245b9023afe2ffffff1100ffffffff00000000000000000173ed4400000001634e93e700001d3677a8f1440173ed4160000d000e000f0010245b9023afe2ffffff1100ffffffff00000000000000000173ed4500000001634e93ea00001d3677b833840173ed416000110012001300142848010124871f46ee0eb1ae00a27d5c29f6cdbcc378c1f4f1380805ff2297c9ed9fcf2200013213a09776db739953220712f110cafb8c5d8dc0fab70e9391a4894fc7cc8706b210f3282d0d6691f0235fdd6b31911b4bd49619c99ab3dbcb80305cea71d75d2eb0016d00128207e9d152c168ba8ab00018009122330000000000000000ffffffffffffffff81fa7454b05a2ea2a8280091001634558d88cb7e0929c9a44ffc1cf3c5a230c0db9b7ba3f7e489ee00f8289579a5c967d13c5fdbab5e261b0b55885aa0a63abbc4487521903d912f3e35c296ad0eb23d001b0010cc26aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac23305e8350c57b37e003f008e004000410111000000000000000050001532139abefb5447ac011734d52324dd76a05aff0775b45c928601222624dbc2141bf2fa4c3e38ec56db5fc900c7c628d963f6fd5f016fdbf141f3f5ae91c3314e7b7f016d00128207e9d152e9a4694ab00072009122330000000000000000ffffffffffffffff81fa7454ba691a52a828009100162455cc26aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac23305e83a13cd8b7e0128008e00170041006bb0400000000000000000b9f6a280000e9b3bd478a1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc028480101de5adf45c03a745fc9d3a418d9e2ba096db9c1aaa77bc0898b6d9ebb611d2b40000532bfe179ee8495cd144a2f6d30950db2d48088fac5f9a778519dc0114219bf54cead2e7212dcdf398af721e9e425782d575b6c4026b7ec52ea1d3fe9dbb2bbe2aecf001a0010000100f59f3900058edb600003a6cef51e28880000e99c6da994200b9dfdf25ea78c41c94e4584ff2623b917b789f5d0e6a859861410542cb94e76fdc6efde77dc19b6aa8de0d4ad05651468c38cb9f64e1f8cf1351de6d5a7d98d56d6ded0be00bb00bc23130103f4e8a960b45d4558001900740091231301022a87a0b197c88778001a001b0091331367865aed64db08164a3138d816a8cacbb8b8fa46bc1fd1023ef5bd4e8fb6299ddc6201a0bfe76f22f36ca8ad0c54aa645c5739752b0a53bac3e57e9dda18febd0027000f01015ec2a32762fd21d800270028009122130100cbc4fd8a34cb65a8001c007822130100596b57d9c1932d880079001d221301003f0bad3989c46848001e007c221100e0b187aea6583a68007d001f221100e0a7528c0ef9512800200080220f00c141a6498c4d0800810021220f00c02225548664a800220084220f00c0221de12e910800850023220f4030085e7768002a00870024220f00c02170f2272c280025008a219dbceaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa818042d76318e19f365e6f7e0780eb2bc9f0c382940ef38b61bb6257ad617eb36fe8c9f56964f2800003a6cef51e28700262277cff55555555555555555555555555555555555555555555555555555555555555554085ac1288e0000000000000074d9dea3c5118042d76318e195d0008c008d2313010068a1a14c598fee380029002a009122130100f62101db096d33a80092002b28480101357b3e386bb95837e17d8fc7dd37f292efdc3301f6e109d8b33eb8a02afbb95d002228480101df3229c929cdae91378fd16242bda5a97246c54da4e9700d7427829825ee7b5d001922130100dd08f96dc461bcc8002c009522130100a6df074312541a08002d002e22130100902873121142a0c80098002f221100f6b6943101117948003900ae2213010090175c7b3161aee8009a0030221301008fb45fdc97d252080031009d221301008f689e5fb80803e80032009f221301008f677b3e09283ac800a00033221301008f677a70024cc70800a20034221301008f6779cbfeb8f188003500a521a1bcd99999999999999999999999999999999999999999999999999999999999982011ecef393af6d61933fe8eada66c79771a5de359b379b70bfe4414022dee90877585c9818f39dda000003a6cef51e2850036227bcff33333333333333333333333333333333333333333333333333333333333333334081ac1664bc000000000000074d9dea3c50e011ecef393af6d6196d000a700372355ec039e4242ff8cc69bf4260c44ddc7b820f838fa85ad1828d2b83ace409d6c02a3b89a505ac592d94a7c4d00a900aa00382179a0634dfa13634f7a130000800006226ee3dc107c1c7d42d68c14695c1d67204eb60151dc4d282d62c96ca53e26c0100ee542c8b882e30ec339334e5ca000ac221100f6a2a63eab3d4e48003a00b0221100ea5905b0b329bd88003b00b2221100ea58fcd0996ec14800b3003c220f00c035987df0cb08003d00b6219bbd62f8f7bea30f8ab5e9f16c3fb8642b118f56ed1bdc49600dbe5220c8b1af9e040c474f803d1a0544cba813425adf3253dd3727a789c9e418b5e788a4cab5df805caee92b00000e9b3bd478a1c0003e236fcff34517c7bdf5187c55af4f8b61fdc321588c7ab768dee24b006df29106458d7cf21881f48000000000000074d9dea3c5110311d3e017f000b800b900ba28480101db29f7a5808e1a673feb2258d777f3005642991b8025b1f92bcd7c498a8bd8ed000222bf000100f59f3900058edb600003a6cef335e0880000e99c6da994200b9dfdf25ea78c41c94e4584ff2623b917b789f5d0e6a859861410542cb94e76fdc6efde77dc19b6aa8de0d4ad05651468c38cb9f64e1f8cf1351de6d5a7d98d56d6ded0be0042004328480101b20e36a3b36a4cdee601106c642e90718b0a58daf200753dbb3189f956b494b600012213c3c000074d9de66bc12000bd004432014645ed4db913f636932b4bebb489b51112e1a415136d57d6c6735ffc4bd556606e33f960111aa97c043f6040c47d1298da52d6a46a0378304ba87f61f3dee9db0010000c20005100522211480000e9b3bccd782400bf00452211200003a6cef335e09000c100462211200003a6cef335e09000c3004722116200003a6cef335e0900c500482211200003a6cef335e09000c700492211200003a6cef335e09000c9004a2211200003a6cef335e09000cb004b2211000003a6cef335e09000cd004c2211400000e9b3bccd782400cf004d2211000003a6cef335e09000d1004e2211400000e9b3bccd782400d3004f2211400000e9b3bccd782400d500502211d000003a6cef335e0900d900da220120005300f822012000dd006722012000540055220120005600fc220120010f005f220120005700fe2201200058010022012000590102220120005a0104220120005b0106220120005c0108220120005d010a220120005e010c28480101f25a1e1d7f11115186543ff6eb95e3d9b98f71d2c959af6b0dad6b63cd1e6d6900012201200060011222012001130061220120011500622201200063011822012001190064220120011b0065220120011d006628480101a96f5d75bc79b8d1640e680704965baaa2245e4b3a5ad8e980ef5a3568409418000222012000df006822012000e10069220120006a00e422012000e5006b22012000e7006c22012000e9006d22012000eb006e220120006f00ee220120007000f0220120007100f2284801016f2780ba9d3cdce8eee34a23d893d90800da0ac1be8a973c513909136b7f636b000223130103f4e8a974d234a558007300740091231301022a87a0c5b59fe77800750076009128480101827773c365eccfd6cb46a3f783a09f1aba77ce1a0a4d62048569e9b845e955f8016b3313e844a4da57b7cf2b0b52b004cc5886c966478e37012ee2d9b473bf083e1e3072beee68024dd8cc7dffdfde9e114e4818dc10824b446d661453963fa117c1fcbf0027000f01015ec2a33b80d481d8008f0090009122130100cbc4fd8a34cb65a80077007822130100596b57d9c1932d880079007a28480101cf20bddca78403c3e40e2ab1b3eeb526c425b5efb531e6c5bd3d52019c25125c002628480101ff7081e66c7f0d6e868021316b0189b9e67b61284c176cd74f78fa6baa18a025001a221301003f0bad3989c46848007b007c221100e0b187aea6583a68007d007e284801011d818de56750d053b2a227f0da85ba37fb1869d0c774629d04e2668e1204c0a3001b28480101174c3878604468b08b7b76f5349c41201ae28b81ca1d94794ab11a692e4668250018221100e0a7528c0ef95128007f0080220f00c141a6498c4d080081008228480101e00721ae4b2be2ed708fa68e6b7bec2759a107504812069b3b8aa9acea346c66001528480101747c06e45f53ca1dc7d23f39db07d12f2e67fa595a36fa41d26683ab42d1a3040014220f00c02225548664a800830084220f00c0221de12e91080085008628480101eb38ef90c590bedb3ce31140d2d4176d43db6b7aab35df685afc4ccf2a383209000b2848010179a2e20b8a926ab2fe83108ff00f2fbced9958047008e5cb5fdf8c798aab63850010220f4030085e7768002a0087008828480101a248b81f22333cc28f6b6744e4298aefcd9b6f2dc5d7c99e1da1b28c37f3aa0c0007220f00c02170f2272c280089008a219dbceaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa818042d76318e1805a8c67376726b2dcf563d47e9c2ed6fa8fd993942535d6c8ed758902593e99400003a6cef706707008b284801010143b3d2dd671b2559543155e003f847022e510b3a57afabbca05d4069c327ef000d2277cff55555555555555555555555555555555555555555555555555555555555555554085ac1288e0000000000000074d9dee0ce118042d76318e195d0008c008d2848010164a43970f2007a1da6d6fc81773cc095d1cc270e81359e471f3b03469abeb7b5000c214900000027cbb9d1062954439a83a91f27835fb9d2e3e798910356650c3c493c946234646840008e28480101374e198a900e08edc634a5f2ad73e388b0a3019d24269fae8046024e437476b1001028480101903aa268fecbed38822a8972ba42eadb53c0972f11b8486f1534210245db4898002322130100f62101ef274493a80092009328480101a5a7d24057d8643b2527709d986cda3846adcb3eddc32d28ec21f69e17dbaaef0001284801012bd772e408a34578028922281a3e5b5384970a6a6dd741b1cfa3b80a3e5ec57d002322130100dd08f981e2391cc80094009522130100a6df0757302b7a080096009728480101b90ed7fc04a4971294b12a078ec8189e8fdba184de6e23043922a774ae403ee2002422130100902873262f1a00c800980099221100f6b694310111794800ad00ae28480101eda54e0b0237690499c3e159ab800469fdbcb3c162d42181c2c298acd4e98f3100152213010090175c8f4f390ee8009a009b28480101cc6ead611f9fa7c0598d8f88d658fe0b91f5f9c9635c872154234c16c722970c0014221301008fb45ff0b5a9b208009c009d221301008f689e73d5df63e8009e009f28480101c7c146bea2ced23475861d11146c0560a46c3d243563fda0e32bf8c34229d2670013221301008f677b5226ff9ac800a000a128480101ef1aa8b2068cf6a8eadef8197235a5d5976865a32a3ad1fe80db069ddb8cc2fe001128480101c2ef35325f62d0b4cc17d1f5d083894100c3c478504d70b6eb8d3cf26e604ff40011221301008f677a842024270800a200a3284801018a51fe69422dbf7e028fb1dcac5a62064eefeb4c080793e78a24ef22334b307c0010221301008f6779e01c90518800a400a521a1bcd99999999999999999999999999999999999999999999999999999999999982011ecef3bbeb1c21823653419f6ebabf10f7978dae7e33a12d3bbe9815917a62eb4fe902f2a96e88600003a6cef70670500a62848010150725eee52e86432f846698a08ac153a67bc9ad9c160130af907c3bef05f29480007227bcff33333333333333333333333333333333333333333333333333333333333333334081ac1664bc000000000000074d9dee0ce0e011ecef3bbeb1c2196d000a700a8284801016217f872c99fafcb870f2c11a362f59339be95095f70d00b9cff2f6dcd69d3dd000e2355ec039e4242ff8cc69bf4260c44ddc7b820f838fa85ad1828d2b83ace409d6c02a3b89a505ac592d94a7c4d00a900aa00ab28480101ff06225996392d9e78d92fef981828f3459892841111b2d352901236d506cb65000b28480101336df3bd068890e3f26c1a8f5e77c4bf7cc3c81fc88006ab614b6db43647262600072179a0634dfa13634f7a130000800006226ee3dc107c1c7d42d68c14695c1d67204eb60151dc4d282d62c96ca53e26c0100ee542c8b882e30ec389aaabdca000ac28480101b8ad45439ed0f9f1ffb12362a0c0a6f522734feed11dda077d5f6067f1305170000b221100f6a2a63eab3d4e4800af00b028480101e2a96bbff9be849635722263833d77a90f0a832b410f8b73bca56041fd7e21970016221100ea5905b0b329bd8800b100b22848010130dd0d5ef5796dc4c101fbf5b4b083599e509d0f738b07a8dbfad6b5ae53aecb0012221100ea58fcd0996ec14800b300b42848010130219e3c8c788af6da8a296da6f3e9925c909eed9821a0ae1911c38f56f7b37e000b28480101e2bc337ece7f3af5171f3265f44c612fc2fcba87f4b4563dc7fdc3285dd6a44d0008220f00c035987df0cb0800b500b6219bbd62f8f7bea30f8ab5e9f16c3fb8642b118f56ed1bdc49600dbe5220c8b1af9e040c474f804c5ac6247ef1e5d11d080c3d8b21135b54598a72e11fbc6ebe1fa0c4b2a7df0a00000e9b3bdc19c1c000b72848010118dd0a8040c21a2cfb6c0acf4ad636dc67ef3ab0a3e102f1b43ad500c55728d00007236fcff34517c7bdf5187c55af4f8b61fdc321588c7ab768dee24b006df29106458d7cf21881f48000000000000074d9dee0ce110311d3e017f000b800b900ba284801017269fb9feb45d719ebdbc3b0816b987bab06f43378dc84dc84d55727905482140002004811fd096c000000000000000000000000000000000000000000000000000000000000000028480101986c49971b96062e1fba4410e27249c8d73b0a9380f7ffd44640167e68b215e800032213c3c000074d9dea3c512000bd00be22012000db00dc28480101258d602eaa21d621634dcf86692aeae308ff3cf888f3edafc6a5b21848d732f900182211480000e9b3bd478a2400bf00c0284801014b01ebcf5425735461aa8b83bae89e70fa21e95d2ee85e57b05dad26c1d6d53000162211200003a6cef51e289000c100c22848010165b0a85a0fdea0c76a2a98445623ea62427099a6318624794dea416f1bdc6f5c00152211200003a6cef51e289000c300c428480101b5b64686c719580155341cb7347af0405dec7158c283ad30833b07325bdc48a5001422116200003a6cef51e28900c500c628480101fde4f74a9866e3de066d6d27e3b1fe107053ecce8b54d8b05ebf4a3b0789c26b00112211200003a6cef51e289000c700c828480101f7a4391731a8136b142d214311bd2f8c162938f27185d22de576a045a13b1e1600102211200003a6cef51e289000c900ca2848010187c846be2bc06a266ae017ae9a13c66cf156125edd95b8bd4f6cfe3c903e3b35000f2211200003a6cef51e289000cb00cc2848010122da148fcc6a6a317ae3c41ee888034019cbfa89e57f306b85601dd2045d6daa000e2211000003a6cef51e289000cd00ce2848010191c44865f6767ab41750fbf5117df2d8be3110925c7993aa2e03780673c31f32000d2211400000e9b3bd478a2400cf00d0284801016d16afa0d70d41df6abe49636527c0b566bd3b722b731eba03433d7efbcb3908000b2211000003a6cef51e289000d100d228480101d744ca7d3ce6fe4538b3fa6a138971ca129c227d8a6736a9cd1d33c2f1fd06cc000a2211400000e9b3bd478a2400d300d42848010175d211346d824c33aff56800c12e0b320854590aadfd85e3f909502cdb6ec3c100082211400000e9b3bd478a2400d500d628480101322f03bbddf42b900d602199315f5d4befa1a9282a2a6c845f3db6ccd2b6bfc000062211cc00003a6cef51e28900d700d82211000003a6cef335e09000d900da00a9d0000074d9dea3c51000003a6cef51e28802e7da887bc30110fabeb509a897a3390f976cc960ac3abd703b5118af0561c6dd340fc9affafb002ac3ea97ff8196b89d89d0abbdddd6dfbe4da9933410dff5f726b01528480101523e62a3a95932c2a65f2314a8a818f82f48644967cc31dcfda9954109d8b55100012848010177c2748c31a7f78c56862aa9d06df60981de7aaa59e67d4d0360a2903384fe1500013201032a5ac73da06a6b989d158bec539003d36dc087d663eda6337be5667c284f16310ee22bacedde5f1c215edbbbdf7a1c20c98ec248b7893266ebfceeb41817bd000f000c2000f700f822012000dd00de284801019deed5e9cd5995ad6c97a06276c939029a1d05a6de03b6c724a4b5567e9adb7a000e22012000df00e0284801016bc4ad2e5c909f6f452be243edc65694f7e6db5f2fc615f69756954a60a563a2000c22012000e100e2284801016925c827cdb72656785a860c0ed1b94c1ff9f0614b9e2ed1b0aa1ee8fbb395aa000c22012000e300e422012000e500e62848010197d9c97586b5cf9a93f5077cf1e13c91f7a4d5b240601e4d08030ab62cd17707000b28480101f613c63e75ce90bdb3aadf01297ba9a958588392473ea542ef8654f281d2854f000922012000e700e828480101d83f99b6b2deca33e45337ea0fa4788a5590c2a9f88654c24c1e4b5282ec7787000822012000e900ea28480101497deb7f82cc061521c9f6bf58ddd3043ecb1dbaea13352ecb73bb53236a9dd8000622012000eb00ec28480101e86bec3c2e5a0c5b9bad30e9b0efd5c74409fece4efd571f8fe02eccbbd0af1a000422012000ed00ee22012000ef00f02848010178a2f12e152f91343bff8aeda8ca7bab1039578fb6b03832c150f22786d0500c000422012000f100f228480101b26a0cc496805853f303d8a00ae9fc7f7b20dc7cab6d1d1c21f5b86469874a84000202012000f300f428480101a31f27b17ffa79bcaf0e47f55dffa054f825e019e447026255e7e1a8d7488701000200b1bcd91dbefdb40075ad92878e330bb79115bcfc28f3c5b9833df391ce8138514a3199fcd1800000000000006500000002b0d782ed8000003f2a3414d8b199f72d0000000000000040800000038a0ed0708000002d14497b814002014800f500f600afbc6827bcf8957c10b8a5694ecd7f0dd41e6a2cd906c77e5340983b618fb6fb0800000000000000000000000000000000000000000000000000000000c695ef7e000000000000007200000006efab3f64000000450742496300afbc66e5f2524ea28a3bde37c9b8f9e929de2e8e0ae9b0b84a7deeee8d71d424e8c69d27d400000000000000e2000000093197d6c2000000a8632502f4c69d1ba4000000000000008a000000116e3e81da0000006af0f2148122012000f900fa28480101912d60694234d59e4645f5d2ebd90e081979a3f6eaf4124bec3980e4547a5094000e22012000fb00fc220120010f011022012000fd00fe28480101348a81067d100edaf90feeb18db50c3c315a07c6c0944b52368c30e76a6f40df000b22012000ff0100284801018540d2166efad6f7a81289ddf3983d3ed177993dce47ccb150f2fcc287428d53000a220120010101022848010110b3b5e79df7c963efb443120853eb1bf9377e78020993bf79d5aaa9b02d1a6a000822012001030104284801016f610eec3a1e4dc9bdacbda0e586e7a8f6b4734b6599ecc0f8c5d0e9666d0ed3000822012001050106284801019100c451439a1cfdcf444d77bc78d03f19ca5e71b1f8fdae5e9e0ccf3e8214a000072201200107010828480101e0140ab9f7e276e1143af00713243e470dfc2c93c02b124622926fd33551a71b00062201200109010a284801014ef684da255649795b7830d1100f419d8f8a0eeb9ed6ed3610ba20b5d815deed0003220120010b010c2848010155cdb8f72801ef11ba562172ed2626c88208eddcf4a0c8f6d5447a785d02b790000202037820010d010e28480101b6eb72df89b91190ab85640f1ef9817bf00e49c5c11e8fd173b5b382ca4a104700010073dde8c69d27d40000000002e7da88000004baec525bbe000096f6a2fa0e38c69d27d4000000001598a6b2000004da46f3846200009e49d38f1eeb00afbbdc4b61f8041625a15bee3b094ff72034e12e69e8d71521ac6748fe6359832319afc8c8000000000000066800000033ffaa8b0000000419c4a9d68319afccd800000000000003d00000002e72705fc80000026a49c0209c284801018dfe3c99df194f8fec2b5b64b5ef08296b853794a29497c7c425ca62a44695e6000c2201200111011222012001130114284801010c275d6749b7c9102256e4abafdecda16a1697f20d26cd0e711bd9d58ef4a2f2000a28480101c169f7745c95d5f3f6b4e550c15978aaf563631f3a9e6ddaaa361ed4042e4f05000922012001150116284801017a3b4493fefcfd2275fa2f6ab01a8db5d70a443fb48dfb00e152545adfb97cbb0007220120011701182201200119011a28480101b13de2fa76c60764833d05264a2e1081609e2dfa9a04bf8d6c5e1162ac7d47cb000728480101027742b12159d2d1310044b4a94e1eea928905b045871a52b552b4b4841288400006220120011b011c28480101df4611dc79f46dc700809e0c3140796be5ca9572c3f3fab70ddbe6a5460bf4900003220120011d011e28480101d52b65c44fcc1a90bbbf8cc01e8ab9c7b6c51f95c2735d6de72a669c1135a8360003020162011f01200201200121012200b0bc885a77c249fb95f38bb13224853b7944942c4b10b84f1b99aa32892aabd4f46335f78a00000000000000bc00000003fb0146f100000074c1cd03f56335e64b0000000000000058000000040ca9eccb000000387d19d10000afbc60807ffe2b018ea1eb65ddc523f7772fc1e1f16e73ee8905b4b66c12275ca800000000000000000000000000000000000000000000000000000000c67fd5260000000000000096000000081b2d1d7000000064c8dda77100afbc799607d471065ad26b0a721946ed764b15a01cf341e11989f08863962a362800000000000000000000000000000000000000000000000000000000c69d27d4000000000000003e00000002165b2b1c0000002c0b7cc7ad0103802001270001021101918f8df47d89a592d9a8e2220276e210d49d789c174ab2b303917d71c6655837000782012f0317cca5687735940043b9aca00401280129012a0247a00f076afb8843d0d2618df1779691876f9ffd59f9b30f47df60f49496744dec67200610012e013b0103d040012b003fb000000000400000000000000021dcd650010ee6b280087735940043b9aca004010150012d01db500e3a26680b9f6a280000e9b3bd478a000000e9b3bd478a0d73fd8f873243316a5b55a0395be4d1c584d71a7c52116b6379a3e93f9649360375de02eb9865b5786e167c4411e5c415cf6064be75fe04b0f81f0c4b84c7260880002c7d7c00000000000000000b9f6a0b1a749f2a012c001343b9aca0021dcd650020020161012e013b0106460600013f020340400130013102037604013201330297bf955555555555555555555555555555555555555555555555555555555555555502aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaad00000074d9dee0ce0c1014c014e0397beb33333333333333333333333333333333333333333333333333333333333333029999999999999999999999999999999999999999999999999999999999999999cf8000074d9dee0ce00400134013501360397be8517c7bdf5187c55af4f8b61fdc321588c7ab768dee24b006df29106458d7cf029a28be3defa8c3e2ad7a7c5b0fee190ac463d5bb46f71258036f9488322c6be7cf8000074d9dee0ce004001410142014301035040013701034040013b0082722f7566ede0ba3a333ac2ca4e9820a0eb28fa3c675e8c5b7378fbba7d487af6b6d8b330226ee7a4226c9a4e28167203a4dec229d3f51655422b56dd7122352fda03af7333333333333333333333333333333333333333333333333333333333333333300001d3677b8338199ff4756d3363cbb8d2ef1acd9bcdb85ff220a0116f74843bac2e4c0c79ceed000001d3677a8f142634e93ea0001408014d013801390082722f7566ede0ba3a333ac2ca4e9820a0eb28fa3c675e8c5b7378fbba7d487af6b69e73e012c2b93293818802ecda692b6a70c7bc140c3d6ba22159dd15949099300205203024013a015100a0431b9004c4b4000000000000000000960000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003af7333333333333333333333333333333333333333333333333333333333333333300001d3677b8338232a2e0d714f1820922c85912266b270b551f97fd88c9ce96b02fbe0b94fedd3300001d3677b83381634e93ea0001408013c013d013e0101a0013f0082729e73e012c2b93293818802ecda692b6a70c7bc140c3d6ba22159dd1594909930d8b330226ee7a4226c9a4e28167203a4dec229d3f51655422b56dd7122352fda020f0409283baec018110140015100ab69fe00000000000000000000000000000000000000000000000000000000000000013fccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccd283baec000000003a6cef706700c69d27d440009e42614c107ac0000000000000000064000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000103504001440103504001470082723145f857768776495406acbcd9f6451e43b82a7cf2b787bdfcd66f54e8f61eb2f5fc1aa51cd06879f30dac067b3d17d0571b7c8ef15db6c57ce3e90f63e7d80803af734517c7bdf5187c55af4f8b61fdc321588c7ab768dee24b006df29106458d7cf00001d3677b833817a340a8997502684b5be64a7ba6e4f4f1393c8316bcf1149956bbf00b95dd25600001d3677a8f143634e93ea0001408014d014501460082723145f857768776495406acbcd9f6451e43b82a7cf2b787bdfcd66f54e8f61eb20d9c166ab6df5f0d47d18e86fc45c1e5f42681c1184337189ef2e4aa3f2552c10205203034014a014b03af734517c7bdf5187c55af4f8b61fdc321588c7ab768dee24b006df29106458d7cf00001d3677b83383a4dbec8658831b756fd060883f7d013972d9838f66cebcd2e28d66f2b2d6d46900001d3677b83381634e93ea0001408014d014801490082720d9c166ab6df5f0d47d18e86fc45c1e5f42681c1184337189ef2e4aa3f2552c1f5fc1aa51cd06879f30dac067b3d17d0571b7c8ef15db6c57ce3e90f63e7d8080205303034014a014b00a042665004c4b400000000000000000030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069600000009600000004000600000000000519ae84f17b8f8b22026a975ff55f1ab19fde4a768744d2178dfa63bb533e107a409026bc03af7555555555555555555555555555555555555555555555555555555555555555500001d3677b83383f9b2f37bf03c07595e4f861c14a0779c5b0ddb12bd6b0bf59b7f464fab4b279400001d3677a8f143634e93ea0001408014d014e014f0001200082720ac47779e474df79ac188caf2308fa7fccf511a8be789a6502f15ca63fba64408669008ce4710e1108a5eee86c282b1d13feaf7634e0c592943ae844ddd4ca0c02053030240150015100a041297004c4b40000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005bc00000000000000000000000012d452da449e50b8cf7dd27861f146122afe1b546bb8b70fc8216f0c614139f8e04d7cef969") + c, _ := cell.FromBOC(boc) + + var block tlb.Block + if err := tlb.LoadFromCell(&block, c.BeginParse()); err != nil { + t.Fatal(err) + } + + parents, err := GetParentBlocks(&block.BlockInfo) + if err != nil { + t.Fatal(err) + } + + println(len(parents)) +} + +func TestBlockNotMaster(t *testing.T) { + boc, _ := hex.DecodeString("b5ee9c72e1021c0100040b00001c00c400de0170020402a0033c036a037c0387039e03b6041c048204ce04ea0536055405a005ec060406200700077007bc080908100817041011ef55aaffffff110102030402a09bc7a98700000000840101c745200000000100000000000000000000000000634e94ec00001d367caaae4000001d367caaae419bbc68ac00058fb00173ed920173bfbec400000003000000000000002e05060211b8e48dfb43b9aca00407080a8a04250ec78adc9d082383679c3289edc662b628be0e34e51a8f7c412e98d24c8a5fb59960f376a6ad4dce93f406ce904add5a2aea140c99b877d02f67f1cd1e5f51021902190c0d03894a33f6fdb1c342502d7261843b4a3bfdbfb766c45705b7c4410af03c358431620ff05a79b1be0d76ede085c08726e04bad3c5779d949364eb56540f06c2c49b98d514111401a1b1b009800001d367c9b6c040173ed92b57df82537164b18661e22f620e1a7a15826a73d7402eef9433d55c030232370a7caa150ac8f2f4c74cb5c77e6671edb6f8accd65c683faf6e48a88720b2c72d009800001d367c9b6c0101c7451f78d2820caf6a5f100a444450ddab2f7754bbce7c6027dce5349269227866124a33b3efd318a7ec75c8f26844fd4dce5f581927f670a0087d7fec56658b487d720225826b977bb75290e16c135cbbddba94870b40080909000d0010ee6b2800080201200a0b0013be000003bc91627aea900013bfffffffbc8b96fc9c50235b9023afe2ffffff110000000000000000000000000001c7451f00000001634e94e900001d367c9b6c010173ed91200e0f10235b9023afe2ffffff110000000000000000000000000001c7452000000001634e94ec00001d367caaae410173ed9220141516284801017e49cb3c190a5033a93c907c6631d4459cf4bf71f57f041dd14270fb919423dc000122138209ae5deedd4a4385b011192848010125e39d851243cee82c062dd588cfa4587461b7869f68023bad26988d33bf8a24000223130104d72ef76ea521c2d81213192848010105a0d0f5cf8e9d2d98f032e935e8de2208463332de6c74af0b9d5cfc2bc2802102162848010157c418ac5021e527850e982354ed5a21fd7a0b0ac719e443fcd3c80f496dc4db003401110000000000000000501722138209ae5deedd4a4385b0181921d90000000000000000ffffffffffffffff826b977bb75290e16bb5f5e54ddd448c900001d367c9b6c040173ed92b57df82537164b18661e22f620e1a7a15826a73d7402eef9433d55c030232370a7caa150ac8f2f4c74cb5c77e6671edb6f8accd65c683faf6e48a88720b2c72d819006bb0400000000000000000b9f6c900000e9b3e4db601ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0284801012aa19c773967de4112363f58e8331a68fb2b3fcb1d55daf352b93c497a019ce4021728480101b3e9649d10ccb379368e81a3a7e8e49c8eb53f6acc69b0ba2ffa80082f70ee39000100030020000102b1e6b8f1") + c, _ := cell.FromBOC(boc) + + var block tlb.Block + if err := tlb.LoadFromCell(&block, c.BeginParse()); err != nil { + t.Fatal(err) + } + + parents, err := GetParentBlocks(&block.BlockInfo) + if err != nil { + t.Fatal(err) + } + + println(len(parents)) +} diff --git a/ton/integration_test.go b/ton/integration_test.go index eba1bddb..b08756cd 100644 --- a/ton/integration_test.go +++ b/ton/integration_test.go @@ -111,7 +111,7 @@ func TestAPIClient_GetBlockData(t *testing.T) { t.Fatal("Get shard block data err:", err.Error()) return } - _, err = data.BlockInfo.GetParentBlocks() + _, err = GetParentBlocks(&data.BlockInfo) if err != nil { t.Fatal("Get block parents err:", err.Error()) return @@ -121,18 +121,32 @@ func TestAPIClient_GetBlockData(t *testing.T) { // TODO: data check } -func TestAPIClient_GetOldBlockData(t *testing.T) { +// commented because public archival LS works too bad to test +/*func TestAPIClient_GetOldBlockData(t *testing.T) { client := liteclient.NewConnectionPool() - ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) - defer cancel() + var ok bool + for i := 0; i < 10; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 7*time.Second) + err := client.AddConnection(ctx, "135.181.177.59:53312", "aF91CuUHuuOv9rm2W5+O/4h38M3sRm40DtSdRxQhmtQ=") + cancel() + if err != nil { + log.Println("ERR TRY", i) + continue + } + ok = true + break + } - err := client.AddConnection(ctx, "135.181.177.59:53312", "aF91CuUHuuOv9rm2W5+O/4h38M3sRm40DtSdRxQhmtQ=") - if err != nil { - panic(err) + if !ok { + panic("connect to archive node failed") } + log.Println("CONNECTED") + ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) + defer cancel() + api := NewAPIClient(client) b, err := api.CurrentMasterchainInfo(ctx) @@ -161,7 +175,7 @@ func TestAPIClient_GetOldBlockData(t *testing.T) { t.Fatal("Get shard block data err:", err.Error()) return } - _, err = data.BlockInfo.GetParentBlocks() + _, err = GetParentBlocks(&data.BlockInfo) if err != nil { t.Fatal("Get block parents err:", err.Error()) return @@ -176,7 +190,7 @@ func TestAPIClient_GetOldBlockData(t *testing.T) { } // TODO: data check -} +}*/ func Test_RunMethod(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) @@ -258,7 +272,7 @@ func Test_Account(t *testing.T) { return } - addr := address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N") + addr := address.MustParseAddr("EQCxE6mUtQJKFnGfaROTKOt1lZbDiiX1kCixRv7Nw2Id_sDs") res, err := api.WaitForBlock(b.SeqNo).GetAccount(ctx, b, addr) if err != nil { t.Fatal("get account err:", err.Error()) @@ -277,7 +291,7 @@ func Test_Account(t *testing.T) { fmt.Printf("Data: %s\n", res.Data.Dump()) } } else { - t.Fatal("TF account not active") + t.Fatal("account not active") } // take last tx info from account info diff --git a/ton/transactions.go b/ton/transactions.go index 5c965950..f2ccd323 100644 --- a/ton/transactions.go +++ b/ton/transactions.go @@ -362,8 +362,6 @@ func (c *APIClient) findLastTransactionByHash(ctx context.Context, addr *address return transaction, nil } } - - continue } else { if transaction.IO.In == nil { continue @@ -378,8 +376,6 @@ func (c *APIClient) findLastTransactionByHash(ctx context.Context, addr *address return transaction, nil } } - - return transaction, nil } scanned += 15 diff --git a/ton/wallet/integration_test.go b/ton/wallet/integration_test.go index 7d629e03..a81d1b4a 100644 --- a/ton/wallet/integration_test.go +++ b/ton/wallet/integration_test.go @@ -358,13 +358,13 @@ func TestWallet_DeployContractUsingHW3Masterchain(t *testing.T) { } // init wallet - w, err := FromPrivateKeyWithOptions(api, key, ConfigHighloadV3{ + w, err := FromPrivateKeyWithOptions(key, ConfigHighloadV3{ MessageTTL: 120, MessageBuilder: func(ctx context.Context, subWalletId uint32) (id uint32, createdAt int64, err error) { tm := time.Now().Unix() - 30 return uint32(10000 + tm%(1<<23)), tm, nil }, - }, WithWorkchain(-1)) + }, WithWorkchain(-1), WithAPI(api)) if err != nil { t.Fatal("FromSeed err:", err.Error()) } diff --git a/ton/wallet/seed.go b/ton/wallet/seed.go index 4323e953..39991328 100644 --- a/ton/wallet/seed.go +++ b/ton/wallet/seed.go @@ -7,10 +7,11 @@ import ( "crypto/sha512" "errors" "fmt" - "github.com/xssnick/tonutils-go/ton/wallet/hdwallet" "math/big" "strings" + "github.com/xssnick/tonutils-go/ton/wallet/hdwallet" + "golang.org/x/crypto/pbkdf2" ) @@ -20,8 +21,66 @@ const ( _BasicSalt = "TON seed version" _PasswordSalt = "TON fast seed version" _Path = "m/44'/607'/0'" + _LedgerPath = "m/44'/607'/0'/0'/0'/0'" ) +// SeedOption represents a functional option for seed operations. +type SeedOption func(*seedConfig) + +// seedConfig holds the configuration for seed operations. +type seedConfig struct { + path string + isBIP39 bool + password string +} + +// defaultSeedConfig returns the default configuration +func defaultSeedConfig() *seedConfig { + return &seedConfig{ + path: _Path, + isBIP39: false, + password: "", + } +} + +// WithLedger configures the seed to use BIP 39 and a Ledger compatible path. +func WithLedger() SeedOption { + return func(c *seedConfig) { + c.isBIP39 = true + c.path = _LedgerPath + } +} + +// WithCustomPath configures the seed to use a custom derivation path. +func WithCustomPath(path string) SeedOption { + return func(c *seedConfig) { + c.path = path + } +} + +// WithPassword configures the seed to use a specific password. +func WithPassword(password string) SeedOption { + return func(c *seedConfig) { + c.password = password + } +} + +// WithBIP39 enables BIP39 compatibility. +func WithBIP39(enabled bool) SeedOption { + return func(c *seedConfig) { + c.isBIP39 = enabled + } +} + +// applySeedOptions applies the functional options to the configuration. +func applySeedOptions(opts ...SeedOption) *seedConfig { + config := defaultSeedConfig() + for _, opt := range opts { + opt(config) + } + return config +} + func NewSeed() []string { return NewSeedWithPassword("") } @@ -63,12 +122,19 @@ func NewSeedWithPassword(password string) []string { type VersionConfig any +// Deprecated: Use FromSeedWithOptions instead. +// +// Example: FromSeedWithOptions(api, seed, version) func FromSeed(api TonAPI, seed []string, version VersionConfig, isBIP39 ...bool) (*Wallet, error) { return FromSeedWithPassword(api, seed, "", version, isBIP39...) } +// Decprecated: Use FromSeedWithOptions instead. +// +// Example: FromSeedWithOptions(api, seed, version, WithPassword("")) func FromSeedWithPassword(api TonAPI, seed []string, password string, version VersionConfig, isBIP39 ...bool) (*Wallet, error) { - k, err := SeedToPrivateKey(seed, password, len(isBIP39) > 0 && isBIP39[0]) + useBIP39 := len(isBIP39) > 0 && isBIP39[0] + k, err := SeedToPrivateKeyWithOptions(seed, WithPassword(password), WithBIP39(useBIP39)) if err != nil { return nil, err } @@ -76,10 +142,29 @@ func FromSeedWithPassword(api TonAPI, seed []string, password string, version Ve return FromPrivateKey(api, k, version) } -// SeedToPrivateKey convert seed to private key, -// by default ton seeds are not compatible with bip39, -// but you can enable compatibility with isBIP39 = true +// Deprecated: Use SeedToPrivateKeyWithOptions instead. +// +// Examples: +// - SeedToPrivateKeyWithOptions(seed) +// - SeedToPrivateKeyWithOptions(seed, WithPassword("")) +// - SeedToPrivateKeyWithOptions(seed, WithBIP39(true)) func SeedToPrivateKey(seed []string, password string, isBIP39 bool) (ed25519.PrivateKey, error) { + return SeedToPrivateKeyWithOptions(seed, WithPassword(password), WithBIP39(isBIP39)) +} + +// FromSeedWithOptions creates a wallet from seed with functional options. +func FromSeedWithOptions(api TonAPI, seed []string, version VersionConfig, opts ...SeedOption) (*Wallet, error) { + k, err := SeedToPrivateKeyWithOptions(seed, opts...) + if err != nil { + return nil, err + } + + return FromPrivateKey(api, k, version) +} + +// SeedToPrivateKeyWithOptions converts seed to a private key with functional options. +// Functional options can be used to set different options. +func SeedToPrivateKeyWithOptions(seed []string, opts ...SeedOption) (ed25519.PrivateKey, error) { if len(seed) < 12 { return nil, fmt.Errorf("seed should have at least 12 words") } @@ -90,17 +175,19 @@ func SeedToPrivateKey(seed []string, password string, isBIP39 bool) (ed25519.Pri } } + config := applySeedOptions(opts...) + seedBytes := []byte(strings.Join(seed, " ")) mac := hmac.New(sha512.New, seedBytes) - mac.Write([]byte(password)) + mac.Write([]byte(config.password)) hash := mac.Sum(nil) - if len(password) > 0 { + if len(config.password) > 0 { p := pbkdf2.Key(hash, []byte(_PasswordSalt), 1, 1, sha512.New) if p[0] != 1 { - if isBIP39 { - pKey := pbkdf2.Key(seedBytes, []byte("mnemonic"+password), 2048, 64, sha512.New) - dk, err := hdwallet.Derived(_Path, pKey) + if config.isBIP39 { + pKey := pbkdf2.Key(seedBytes, []byte("mnemonic"+config.password), 2048, 64, sha512.New) + dk, err := hdwallet.Derived(config.path, pKey) if err != nil { return nil, err } @@ -111,9 +198,9 @@ func SeedToPrivateKey(seed []string, password string, isBIP39 bool) (ed25519.Pri } else { p := pbkdf2.Key(hash, []byte(_BasicSalt), _Iterations/256, 1, sha512.New) if p[0] != 0 { - if isBIP39 { + if config.isBIP39 { pKey := pbkdf2.Key(seedBytes, []byte("mnemonic"), 2048, 64, sha512.New) - dk, err := hdwallet.Derived(_Path, pKey) + dk, err := hdwallet.Derived(config.path, pKey) if err != nil { return nil, err } diff --git a/ton/wallet/seed_test.go b/ton/wallet/seed_test.go index 55cc3212..669e451d 100644 --- a/ton/wallet/seed_test.go +++ b/ton/wallet/seed_test.go @@ -61,3 +61,102 @@ func TestBIP39Load(t *testing.T) { t.Fatal("wrong address", w.WalletAddress()) } } + +func TestLedgerCompatibleSeedLoad(t *testing.T) { + seed := strings.Split("prison fuel story response target drill domain fitness heavy mixed meat lend father kiwi before elite exile fee swing make alcohol journey volcano tobacco", " ") + + testCases := []struct { + name string + version VersionConfig + expectedAddress string + expectError bool + }{ + { + name: "V3", + version: V3, + expectedAddress: "UQAnnVwSCsdM-Tukh4qxzSySbtts9HP3tOgR1oQ_bR9wTy39", + expectError: false, + }, + { + name: "V3R1", + version: V3R1, + expectedAddress: "UQADgAfAIcLrtL9V9EpIVhRyLtwzVq324g-PFKa4JMFPOGfP", + expectError: false, + }, + { + name: "V3R2", + version: V3R2, + expectedAddress: "UQAnnVwSCsdM-Tukh4qxzSySbtts9HP3tOgR1oQ_bR9wTy39", + expectError: false, + }, + { + name: "V4R1", + version: V4R1, + expectedAddress: "UQDK_RYsjuYzz88Oh0y7sVlwTGPU7P6RXi0Z2eiH0jr4mtJc", + expectError: false, + }, + { + name: "V4R2", + version: V4R2, + expectedAddress: "UQDNrm1gX7-Vn3_dF-CsUcBqxKG-xqnGqEtHv2opLn9kso_F", + expectError: false, + }, + { + name: "V5R1Beta testnet", + version: ConfigV5R1Beta{ + NetworkGlobalID: TestnetGlobalID, + Workchain: 0, + }, + expectedAddress: "UQBm1gbcTyB-JUNmGMduP-BBWxmMr0C2zjTqMud099roBDLg", + expectError: false, + }, + { + name: "V5R1Beta mainnet", + version: ConfigV5R1Beta{ + NetworkGlobalID: MainnetGlobalID, + Workchain: 0, + }, + expectedAddress: "UQAd8wZRJXRH2v2csZv-qHvDQvcPsVzRdSH0oZ4WUskk7hXr", + expectError: false, + }, + { + name: "V5R1Final testnet", + version: ConfigV5R1Final{ + NetworkGlobalID: TestnetGlobalID, + Workchain: 0, + }, + expectedAddress: "UQCa2r5G2qvZkV0UobPVrAwaF1ykeETpJkUVbhtSOJdTd0ZV", + expectError: false, + }, + { + name: "V5R1Final mainnet", + version: ConfigV5R1Final{ + NetworkGlobalID: MainnetGlobalID, + Workchain: 0, + }, + expectedAddress: "UQA_IG06Eebapl2jBgH_UX64VG8wb0DDmEI9jWhKWBji220F", + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + w, err := FromSeedWithOptions(nil, seed, tc.version, WithLedger()) + + if tc.expectError { + if err == nil { + t.Fatal("expected error but got none") + } + return + } + + if err != nil { + t.Fatal(err) + } + + if w.WalletAddress().String() != tc.expectedAddress { + t.Fatal("wrong address", w.WalletAddress()) + } + }) + } +} diff --git a/ton/wallet/v5r1.go b/ton/wallet/v5r1.go index a8467bca..5e07f05a 100644 --- a/ton/wallet/v5r1.go +++ b/ton/wallet/v5r1.go @@ -66,7 +66,7 @@ func (s *SpecV5R1Final) BuildMessage(ctx context.Context, _ bool, _ *ton.BlockID return nil, fmt.Errorf("failed to fetch seqno: %w", err) } - actions, err := packV5Actions(messages) + actions, err := PackV5OutActions(messages) if err != nil { return nil, fmt.Errorf("failed to build actions: %w", err) } @@ -108,8 +108,7 @@ func validateMessageFields(messages []*Message) error { return nil } -// Pack Actions -func packV5Actions(messages []*Message) (*cell.Builder, error) { +func PackV5OutActions(messages []*Message) (*cell.Builder, error) { if err := validateMessageFields(messages); err != nil { return nil, err } diff --git a/ton/wallet/wallet.go b/ton/wallet/wallet.go index 8ffc7b01..7796e194 100644 --- a/ton/wallet/wallet.go +++ b/ton/wallet/wallet.go @@ -167,30 +167,35 @@ type Option func(*Wallet) func FromPrivateKey(api TonAPI, key ed25519.PrivateKey, version VersionConfig) (*Wallet, error) { return newWallet( - api, key.Public().(ed25519.PublicKey), version, - WithPrivateKey(key)) + WithPrivateKey(key), WithAPI(api)) } -// FromPrivateKeyWithOptions - can initialize customizable wallet, for example: FromPrivateKeyWithOptions(api, key, version, WithWorkchain(-1)) -func FromPrivateKeyWithOptions(api TonAPI, key ed25519.PrivateKey, version VersionConfig, options ...Option) (*Wallet, error) { +// FromPrivateKeyWithOptions - can initialize a customizable wallet, for example, FromPrivateKeyWithOptions(key, version, WithAPI(api), WithWorkchain(-1)) +func FromPrivateKeyWithOptions(key ed25519.PrivateKey, version VersionConfig, options ...Option) (*Wallet, error) { return newWallet( - api, key.Public().(ed25519.PublicKey), version, append([]Option{WithPrivateKey(key)}, options...)...) } +// Deprecated: use FromPubKeyWithOptions(publicKey, version, WithSigner(signer)) func FromSigner(api TonAPI, publicKey ed25519.PublicKey, version VersionConfig, signer Signer) (*Wallet, error) { return newWallet( - api, publicKey, version, - WithSigner(signer)) + WithSigner(signer), WithAPI(api)) } -func newWallet(api TonAPI, publicKey ed25519.PublicKey, version VersionConfig, options ...Option) (*Wallet, error) { +func FromPubKeyWithOptions(publicKey ed25519.PublicKey, version VersionConfig, options ...Option) (*Wallet, error) { + return newWallet( + publicKey, + version, + options...) +} + +func newWallet(publicKey ed25519.PublicKey, version VersionConfig, options ...Option) (*Wallet, error) { var subwallet uint32 = DefaultSubwallet // default subwallet depends on wallet type @@ -206,7 +211,6 @@ func newWallet(api TonAPI, publicKey ed25519.PublicKey, version VersionConfig, o } w := &Wallet{ - api: api, addr: addr, ver: version, subwallet: subwallet, @@ -243,6 +247,12 @@ func WithSigner(signer Signer) Option { } } +func WithAPI(api TonAPI) Option { + return func(w *Wallet) { + w.api = api + } +} + func WithWorkchain(wc int8) Option { return func(w *Wallet) { w.addr = address.NewAddress(w.addr.FlagsToByte(), byte(wc), w.addr.Data()) @@ -315,8 +325,7 @@ func getSpec(w *Wallet) (any, error) { } // Address - returns old (bounce) version of wallet address -// DEPRECATED: because of address reform, use WalletAddress, -// it will return UQ format +// Deprecated: because of address reform, use WalletAddress, it will return UQ format func (w *Wallet) Address() *address.Address { return w.addr } diff --git a/toncenter/client-v2.go b/toncenter/client-v2.go new file mode 100644 index 00000000..41131cda --- /dev/null +++ b/toncenter/client-v2.go @@ -0,0 +1,686 @@ +package toncenter + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "github.com/xssnick/tonutils-go/address" + "github.com/xssnick/tonutils-go/tvm/cell" + "math/big" + "net/url" + "strconv" + "strings" +) + +type V2 struct { + client *Client +} + +func (c *Client) V2() *V2 { + return &V2{client: c} +} + +func (v *V2) apiBase() string { + return strings.TrimRight(v.client.baseURL, "/") + "/api/v2" +} + +type AddressInformationV2Result struct { + Balance NanoCoins `json:"balance"` + Code *cell.Cell `json:"code"` + Data *cell.Cell `json:"data"` + LastTransactionID *TransactionID `json:"last_transaction_id"` + State string `json:"state"` // "active", "uninitialized", "frozen" +} + +// GetAddressInformation /getAddressInformation +func (v *V2) GetAddressInformation(ctx context.Context, addr *address.Address) (*AddressInformationV2Result, error) { + q := url.Values{"address": []string{addr.String()}} + return V2GetCall[AddressInformationV2Result](ctx, v, "getAddressInformation", q) +} + +type ExtraCurrencyV2 struct { + Currency int64 `json:"id"` + Balance NanoCoins `json:"amount"` +} + +type ExtendedAddressInformationV2 struct { + Type string `json:"@type"` + Balance NanoCoins `json:"balance"` + ExtraCurrencies []ExtraCurrencyV2 `json:"extra_currencies"` + LastTransactionId TransactionID `json:"last_transaction_id"` + BlockId BlockID `json:"block_id"` + SyncUtime int64 `json:"sync_utime"` + AccountState map[string]any `json:"account_state"` + Revision int `json:"revision"` +} + +// GetExtendedAddressInformation /getExtendedAddressInformation +func (v *V2) GetExtendedAddressInformation(ctx context.Context, addr *address.Address) (*ExtendedAddressInformationV2, error) { + q := url.Values{"address": []string{addr.String()}} + return V2GetCall[ExtendedAddressInformationV2](ctx, v, "getExtendedAddressInformation", q) +} + +type WalletInformationV2Result struct { + IsWallet bool `json:"wallet"` + Balance NanoCoins `json:"balance"` + AccountState string `json:"account_state"` + WalletType string `json:"wallet_type"` + Seqno uint64 `json:"seqno"` + LastTransactionID *TransactionID `json:"last_transaction_id"` + WalletId int64 `json:"wallet_id"` +} + +// GetWalletInformation /getWalletInformation +func (v *V2) GetWalletInformation(ctx context.Context, addr *address.Address) (*WalletInformationV2Result, error) { + q := url.Values{"address": []string{addr.String()}} + return V2GetCall[WalletInformationV2Result](ctx, v, "getWalletInformation", q) +} + +type MasterchainInfoV2Result struct { + Last BlockID `json:"last"` + StateRootHash []byte `json:"state_root_hash"` + Init BlockID `json:"init"` +} + +func (v *V2) GetMasterchainInfo(ctx context.Context) (*MasterchainInfoV2Result, error) { + return V2GetCall[MasterchainInfoV2Result](ctx, v, "getMasterchainInfo", nil) +} + +type MasterchainBlockSignaturesV2Result struct { + ID BlockID `json:"id"` + Signatures []struct { + NodeID []byte `json:"node_id"` + Signature []byte `json:"signature"` + Validator []byte `json:"validator"` + } `json:"signatures"` +} + +func (v *V2) GetMasterchainBlockSignatures(ctx context.Context, seqno uint64) (*MasterchainBlockSignaturesV2Result, error) { + q := url.Values{"seqno": []string{strconv.FormatUint(seqno, 10)}} + return V2GetCall[MasterchainBlockSignaturesV2Result](ctx, v, "getMasterchainBlockSignatures", q) +} + +type shardsV2Result struct { + Shards []BlockID `json:"shards"` +} + +func (v *V2) GetShards(ctx context.Context, masterSeqno uint64) ([]BlockID, error) { + q := url.Values{"seqno": []string{strconv.FormatUint(masterSeqno, 10)}} + blocks, err := V2GetCall[shardsV2Result](ctx, v, "shards", q) + if err != nil { + return nil, err + } + + return blocks.Shards, nil +} + +type ShardProofLinkV2 struct { + ID BlockID `json:"id"` + Proof *cell.Cell `json:"proof"` +} + +type McProofV2 struct { + ToKeyBlock bool `json:"to_key_block"` + + From BlockID `json:"from"` + To BlockID `json:"to"` + Proof *cell.Cell `json:"proof"` + DestProof *cell.Cell `json:"dest_proof"` + StateProof *cell.Cell `json:"state_proof"` +} + +type ShardBlockProofV2 struct { + From BlockID `json:"from"` + MasterID BlockID `json:"mc_id"` + Links []ShardProofLinkV2 `json:"links"` + McProof []McProofV2 `json:"mc_proof"` +} + +func (v *V2) GetShardBlockProof(ctx context.Context, workchain int32, shard int64, seqno uint64, fromSeqno *uint64) (*ShardBlockProofV2, error) { + q := url.Values{ + "workchain": []string{strconv.FormatInt(int64(workchain), 10)}, + "shard": []string{strconv.FormatInt(shard, 10)}, + "seqno": []string{strconv.FormatUint(seqno, 10)}, + } + if fromSeqno != nil { + q.Set("from_seqno", strconv.FormatUint(*fromSeqno, 10)) + } + + return V2GetCall[ShardBlockProofV2](ctx, v, "getShardBlockProof", q) +} + +func (v *V2) GetAddressBalance(ctx context.Context, addr *address.Address) (*NanoCoins, error) { + q := url.Values{"address": []string{addr.String()}} + return V2GetCall[NanoCoins](ctx, v, "getAddressBalance", q) +} + +func (v *V2) GetAddressState(ctx context.Context, addr *address.Address) (string, error) { + q := url.Values{"address": []string{addr.String()}} + res, err := V2GetCall[string](ctx, v, v.apiBase()+"/getAddressState", q) + if err != nil { + return "", err + } + + return *res, nil +} + +type GetTransactionsV2Opts struct { + Limit *int + Lt *int64 + Hash *string + ToLT *int64 + Archival *bool +} + +type MessageV2 struct { + Source *AddrInfoV2 `json:"source"` + Destination *AddrInfoV2 `json:"destination"` + Value NanoCoins `json:"value"` + FwdFee NanoCoins `json:"fwd_fee"` + IhrFee NanoCoins `json:"ihr_fee"` + CreatedLt uint64 `json:"created_lt,string"` + Body *cell.Cell `json:"body"` + Message []byte `json:"message"` +} + +type TransactionV2 struct { + TransactionID TransactionID `json:"transaction_id"` + UnixTime int64 `json:"utime"` + Data *cell.Cell `json:"data"` + + Fee NanoCoins `json:"fee"` + StorageFee NanoCoins `json:"storage_fee"` + OtherFee NanoCoins `json:"other_fee"` + + InMsg *MessageV2 `json:"in_msg"` + OutMessages []MessageV2 `json:"out_msgs"` + + BlockID BlockID `json:"block_id"` + Account string `json:"account"` + Aborted bool `json:"aborted"` + Destroyed bool `json:"destroyed"` +} + +func (v *V2) GetTransactions(ctx context.Context, addr *address.Address, opt *GetTransactionsV2Opts) ([]TransactionV2, error) { + q := url.Values{"address": []string{addr.String()}} + if opt != nil { + if opt.Limit != nil { + q.Set("limit", strconv.Itoa(*opt.Limit)) + } + if opt.Lt != nil { + q.Set("lt", strconv.FormatInt(*opt.Lt, 10)) + } + if opt.Hash != nil { + q.Set("hash", *opt.Hash) + } + if opt.ToLT != nil { + q.Set("to_lt", strconv.FormatInt(*opt.ToLT, 10)) + } + if opt.Archival != nil { + q.Set("archival", strconv.FormatBool(*opt.Archival)) + } + } + + res, err := V2GetCall[[]TransactionV2](ctx, v, "getTransactions", q) + if err != nil { + return nil, err + } + + return *res, nil +} + +type ConsensusBlockV2Result struct { + Seqno uint64 `json:"consensus_block"` + Timestamp float64 `json:"timestamp"` +} + +func (v *V2) GetConsensusBlock(ctx context.Context) (*ConsensusBlockV2Result, error) { + return V2GetCall[ConsensusBlockV2Result](ctx, v, "getConsensusBlock", nil) +} + +type LookupBlockV2Options struct { + Seqno *uint64 + LT *uint64 + UTime *int64 + Exact *bool +} + +func (v *V2) LookupBlock(ctx context.Context, workchain int32, shard int64, opts *LookupBlockV2Options) (*BlockID, error) { + if opts == nil { + return nil, errors.New("at least one option should be specified") + } + + q := url.Values{ + "workchain": []string{strconv.FormatInt(int64(workchain), 10)}, + "shard": []string{strconv.FormatInt(shard, 10)}, + } + if opts.Seqno != nil { + q.Set("seqno", strconv.FormatUint(*opts.Seqno, 10)) + } + if opts.LT != nil { + q.Set("lt", strconv.FormatUint(*opts.LT, 10)) + } + if opts.UTime != nil { + q.Set("unixtime", strconv.FormatInt(*opts.UTime, 10)) + } + return V2GetCall[BlockID](ctx, v, "lookupBlock", q) +} + +type BlockTxShort struct { + Mode int `json:"mode"` + Account string `json:"account"` + Hash []byte `json:"hash"` + LT uint64 `json:"lt,string"` +} + +type BlockTransactionsV2Result struct { + ID BlockID `json:"id"` + Transactions []BlockTxShort `json:"transactions"` + Incomplete bool `json:"incomplete"` +} + +type GetBlockTransactionsV2Options struct { + RootHash []byte + FileHash []byte + AfterLt *uint64 + AfterHash []byte + Count *int +} + +func (v *V2) GetBlockTransactions(ctx context.Context, workchain int32, shard int64, seqno uint64, opts *GetBlockTransactionsV2Options) (*BlockTransactionsV2Result, error) { + q := url.Values{ + "workchain": []string{strconv.FormatInt(int64(workchain), 10)}, + "shard": []string{strconv.FormatInt(shard, 10)}, + "seqno": []string{strconv.FormatUint(seqno, 10)}, + } + if opts != nil && opts.RootHash != nil { + q.Set("root_hash", base64.URLEncoding.EncodeToString(opts.RootHash)) + } + if opts != nil && opts.FileHash != nil { + q.Set("file_hash", base64.URLEncoding.EncodeToString(opts.FileHash)) + } + if opts != nil && opts.AfterLt != nil { + q.Set("after_lt", strconv.FormatUint(*opts.AfterLt, 10)) + } + if opts != nil && opts.AfterHash != nil { + q.Set("after_hash", base64.URLEncoding.EncodeToString(opts.AfterHash)) + } + if opts != nil && opts.Count != nil { + q.Set("count", strconv.Itoa(*opts.Count)) + } + return V2GetCall[BlockTransactionsV2Result](ctx, v, "getBlockTransactions", q) +} + +type AddrInfoV2 struct { + Addr *address.Address +} + +func (a *AddrInfoV2) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty value") + } + + if b[0] == '"' && b[len(b)-1] == '"' { + if len(b) == 2 { + a.Addr = nil + return nil + } + + addr, err := address.ParseAddr(string(b[1 : len(b)-1])) + if err != nil { + return err + } + a.Addr = addr + return nil + } + + var tpd addrInfoTyped + if err := json.Unmarshal(b, &tpd); err != nil { + return fmt.Errorf("failed to decode addrInfoTyped: %w (%s)", err, string(b)) + } + + if tpd.Type != "accountAddress" { + return fmt.Errorf("unexpected type: %s", tpd.Type) + } + + if tpd.AccountAddress != "" { + addr, err := address.ParseAddr(tpd.AccountAddress) + if err != nil { + return err + } + a.Addr = addr + } + + return nil +} + +func (a *AddrInfoV2) MarshalJSON() ([]byte, error) { + tpd := addrInfoTyped{ + Type: "accountAddress", + } + if a.Addr != nil { + tpd.AccountAddress = a.Addr.String() + } + return json.Marshal(tpd) +} + +type addrInfoTyped struct { + Type string `json:"@type"` + AccountAddress string `json:"account_address"` +} + +type BlockTransactionsExtV2Result struct { + ID BlockID `json:"id"` + Transactions []TransactionV2 `json:"transactions"` + Incomplete bool `json:"incomplete"` +} + +func (v *V2) GetBlockTransactionsExt(ctx context.Context, workchain int32, shard int64, seqno uint64, opts *GetBlockTransactionsV2Options) (*BlockTransactionsExtV2Result, error) { + q := url.Values{ + "workchain": []string{strconv.FormatInt(int64(workchain), 10)}, + "shard": []string{strconv.FormatInt(shard, 10)}, + "seqno": []string{strconv.FormatUint(seqno, 10)}, + } + if opts != nil && opts.RootHash != nil { + q.Set("root_hash", base64.URLEncoding.EncodeToString(opts.RootHash)) + } + if opts != nil && opts.FileHash != nil { + q.Set("file_hash", base64.URLEncoding.EncodeToString(opts.FileHash)) + } + if opts != nil && opts.AfterLt != nil { + q.Set("after_lt", strconv.FormatUint(*opts.AfterLt, 10)) + } + if opts != nil && opts.AfterHash != nil { + q.Set("after_hash", base64.URLEncoding.EncodeToString(opts.AfterHash)) + } + if opts != nil && opts.Count != nil { + q.Set("count", strconv.Itoa(*opts.Count)) + } + return V2GetCall[BlockTransactionsExtV2Result](ctx, v, "getBlockTransactionsExt", q) +} + +type GetBlockHeaderOptions struct { + RootHash []byte + FileHash []byte +} + +type BlockHeaderV2Result struct { + ID BlockID `json:"id"` + GlobalId int32 `json:"global_id"` + Version uint32 `json:"version"` + Flags uint32 `json:"flags"` + AfterMerge bool `json:"after_merge"` + AfterSplit bool `json:"after_split"` + BeforeSplit bool `json:"before_split"` + WantMerge bool `json:"want_merge"` + WantSplit bool `json:"want_split"` + ValidatorListHashShort int64 `json:"validator_list_hash_short"` + CatchainSeqno uint64 `json:"catchain_seqno"` + MinRefMcSeqno uint64 `json:"min_ref_mc_seqno"` + IsKeyBlock bool `json:"is_key_block"` + PrevKeyBlockSeqno uint64 `json:"prev_key_block_seqno"` + StartLt uint64 `json:"start_lt,string"` + EndLt uint64 `json:"end_lt,string"` + GenUtime uint32 `json:"gen_utime"` + VertSeqno uint64 `json:"vert_seqno"` + PrevBlocks []BlockID `json:"prev_blocks"` +} + +func (v *V2) GetBlockHeader(ctx context.Context, workchain int32, shard int64, seqno uint64, opts *GetBlockHeaderOptions) (*BlockHeaderV2Result, error) { + q := url.Values{ + "workchain": []string{strconv.FormatInt(int64(workchain), 10)}, + "shard": []string{strconv.FormatInt(shard, 10)}, + "seqno": []string{strconv.FormatUint(seqno, 10)}, + } + if opts != nil { + if opts.RootHash != nil { + q.Set("root_hash", base64.URLEncoding.EncodeToString(opts.RootHash)) + } + if opts.FileHash != nil { + q.Set("file_hash", base64.URLEncoding.EncodeToString(opts.FileHash)) + } + } + return V2GetCall[BlockHeaderV2Result](ctx, v, "getBlockHeader", q) +} + +type configParamV2Result struct { + Value struct { + Param *cell.Cell `json:"bytes"` + } `json:"config"` +} + +func (v *V2) GetConfigParam(ctx context.Context, configID int64, seqno *int64) (*cell.Cell, error) { + q := url.Values{"config_id": []string{strconv.FormatInt(configID, 10)}} + if seqno != nil { + q.Set("seqno", strconv.FormatInt(*seqno, 10)) + } + res, err := V2GetCall[configParamV2Result](ctx, v, "getConfigParam", q) + if err != nil { + return nil, err + } + return res.Value.Param, nil +} + +func (v *V2) GetConfigAll(ctx context.Context, seqno *int64) (*cell.Cell, error) { + var q = url.Values{} + if seqno != nil { + q.Set("seqno", strconv.FormatInt(*seqno, 10)) + } + res, err := V2GetCall[configParamV2Result](ctx, v, "getConfigAll", q) + if err != nil { + return nil, err + } + return res.Value.Param, nil +} + +type OutMsgQueueSizeShardV2 struct { + ID BlockID `json:"id"` + Size uint64 `json:"size"` +} + +type OutMsgQueueSizesV2Result struct { + Shards []OutMsgQueueSizeShardV2 `json:"shards"` + SizeLimit uint64 `json:"ext_msg_queue_size_limit"` +} + +func (v *V2) GetOutMsgQueueSizes(ctx context.Context) (*OutMsgQueueSizesV2Result, error) { + return V2GetCall[OutMsgQueueSizesV2Result](ctx, v, "getOutMsgQueueSizes", nil) +} + +type TokenDataV2Result struct { + TotalSupply *big.Int `json:"total_supply"` + Mintable bool `json:"mintable"` + AdminAddress *address.Address `json:"admin_address"` + JettonContent struct { + Type string `json:"type"` + Data map[string]any `json:"data"` + } `json:"jetton_content"` + JettonWalletCode *cell.Cell `json:"jetton_wallet_code"` + ContractType string `json:"contract_type"` +} + +func (v *V2) GetTokenData(ctx context.Context, addr *address.Address) (*TokenDataV2Result, error) { + q := url.Values{"address": []string{addr.String()}} + return V2GetCall[TokenDataV2Result](ctx, v, "getTokenData", q) +} + +// Deprecated: TonCenter advises to not use it, because it is not always finding transaction +func (v *V2) TryLocateTx(ctx context.Context, source, destination *address.Address, createdLT uint64) (*TransactionV2, error) { + q := url.Values{ + "source": []string{source.String()}, + "destination": []string{destination.String()}, + "created_lt": []string{strconv.FormatUint(createdLT, 10)}, + } + return V2GetCall[TransactionV2](ctx, v, "tryLocateTx", q) +} + +// Deprecated: TonCenter advises to not use it, because it is not always finding transaction +func (v *V2) TryLocateResultTx(ctx context.Context, source, destination *address.Address, createdLT uint64) (*TransactionV2, error) { + q := url.Values{ + "source": []string{source.String()}, + "destination": []string{destination.String()}, + "created_lt": []string{strconv.FormatUint(createdLT, 10)}, + } + return V2GetCall[TransactionV2](ctx, v, "tryLocateResultTx", q) +} + +// Deprecated: TonCenter advises to not use it, because it is not always finding transaction +func (v *V2) TryLocateSourceTx(ctx context.Context, source, destination *address.Address, createdLT uint64) (*TransactionV2, error) { + q := url.Values{ + "source": []string{source.String()}, + "destination": []string{destination.String()}, + "created_lt": []string{strconv.FormatUint(createdLT, 10)}, + } + return V2GetCall[TransactionV2](ctx, v, "tryLocateSourceTx", q) +} + +func (v *V2) SendBoc(ctx context.Context, data []byte) error { + _, err := V2PostCall[any](ctx, v, "sendBoc", map[string][]byte{ + "boc": data, + }) + return err +} + +type EstimateFeeRequest struct { + Address *address.Address `json:"address"` + Body *cell.Cell `json:"body"` + InitCode *cell.Cell `json:"init_code,omitempty"` + InitData *cell.Cell `json:"init_data,omitempty"` + IgnoreChkSig bool `json:"ignore_chksig"` +} + +type Fee struct { + InFwdFee uint64 `json:"in_fwd_fee"` + StorageFee uint64 `json:"storage_fee"` + GasFee uint64 `json:"gas_fee"` + FwdFee uint64 `json:"fwd_fee"` +} + +type EstimateFeeV2Result struct { + SourceFees Fee `json:"source_fees"` + DestinationFees []Fee `json:"destination_fees"` +} + +func (v *V2) EstimateFee(ctx context.Context, req EstimateFeeRequest) (*EstimateFeeV2Result, error) { + return V2PostCall[EstimateFeeV2Result](ctx, v, "estimateFee", req) +} + +type RunGetMethodV2Result struct { + GasUsed uint64 + Stack []any + ExitCode int + BlockId BlockID + LastTransactionId *TransactionID +} + +// Deprecated: Use RunGetMethod from V3 api +// +// RunGetMethod - Call contract method, stack elements could be *address.Address, *cell.Cell, *cell.Slice, and *big.Int +func (v *V2) RunGetMethod(ctx context.Context, addr *address.Address, method string, stack []any, masterSeqno *uint64) (*RunGetMethodV2Result, error) { + type runGetMethodRequest struct { + Address *address.Address `json:"address"` + Method string `json:"method,omitempty"` + + // Array of stack elements: *cell.Cell, *cell.Slice, and *big.Int supported + Stack [][]string `json:"stack"` + + MasterSeqno *uint64 `json:"seqno,omitempty"` + } + + type runGetMethodResult struct { + GasUsed uint64 `json:"gas_used"` + Stack [][]any `json:"stack"` + ExitCode int `json:"exit_code"` + BlockId BlockID `json:"block_id"` + LastTransactionId *TransactionID `json:"last_transaction_id"` + } + + var stk = [][]string{} + for _, a := range stack { + switch val := a.(type) { + case *cell.Cell: + stk = append(stk, []string{"tvm.Cell", base64.StdEncoding.EncodeToString(val.ToBOC())}) + case *cell.Slice: + stk = append(stk, []string{"tvm.Slice", base64.StdEncoding.EncodeToString(val.MustToCell().ToBOC())}) + case *address.Address: + stk = append(stk, []string{"tvm.Slice", base64.StdEncoding.EncodeToString(cell.BeginCell().MustStoreAddr(val).EndCell().ToBOC())}) + case *big.Int: + if val == nil { + return nil, fmt.Errorf("nil big.Int") + } + stk = append(stk, []string{"num", "0x" + val.Text(16)}) + default: + return nil, fmt.Errorf("unsupported stack element type") + } + } + + res, err := V2PostCall[runGetMethodResult](ctx, v, "runGetMethod", runGetMethodRequest{ + Address: addr, + Method: method, + Stack: stk, + MasterSeqno: masterSeqno, + }) + if err != nil { + return nil, err + } + + stack, err = parseStackV2(res.Stack) + if err != nil { + return nil, fmt.Errorf("failed to parse stack: %w", err) + } + + return &RunGetMethodV2Result{ + GasUsed: res.GasUsed, + Stack: stack, + ExitCode: res.ExitCode, + BlockId: res.BlockId, + LastTransactionId: res.LastTransactionId, + }, nil +} + +func parseStackV2(stack [][]any) ([]any, error) { + var stk []any + for _, a := range stack { + if len(a) != 2 { + return nil, fmt.Errorf("incorrect stack element") + } + + name, ok := a[0].(string) + if !ok { + return nil, fmt.Errorf("incorrect stack element name type") + } + + val, ok := a[1].(string) + if !ok { + return nil, fmt.Errorf("result stack type '%s' is not supported for v2 api due to incompatible format, use LS or v3", name) + } + + switch name { + case "num": + if !strings.HasPrefix(val, "0x") { + return nil, fmt.Errorf("invalid number format") + } + val = val[2:] + + res, _ := new(big.Int).SetString(val, 16) + if res == nil { + return nil, fmt.Errorf("invalid number format") + } + + stk = append(stk, res) + default: + return nil, fmt.Errorf("unsupported stack element type") + } + } + return stk, nil +} + +func V2PostCall[T any](ctx context.Context, v *V2, method string, req any) (*T, error) { + return doPOST[T](ctx, v.client, v.apiBase()+"/"+method, req, false) +} + +func V2GetCall[T any](ctx context.Context, v *V2, method string, query url.Values) (*T, error) { + return doGET[T](ctx, v.client, v.apiBase()+"/"+method, query, false) +} diff --git a/toncenter/client-v3.go b/toncenter/client-v3.go new file mode 100644 index 00000000..0931c29c --- /dev/null +++ b/toncenter/client-v3.go @@ -0,0 +1,225 @@ +package toncenter + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/xssnick/tonutils-go/address" + "github.com/xssnick/tonutils-go/tvm/cell" + "math/big" + "net/url" + "strconv" + "strings" +) + +type V3 struct { + client *Client +} + +func (c *Client) V3() *V3 { + return &V3{client: c} +} + +func (v *V3) apiBase() string { + return strings.TrimRight(v.client.baseURL, "/") + "/api/v3" +} + +type stackElementV3 struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} + +// /api/v3/adjacentTransactions + +func (v *V3) EstimateFee(ctx context.Context, req EstimateFeeRequest) (*EstimateFeeV2Result, error) { + return V3PostCall[EstimateFeeV2Result](ctx, v, "estimateFee", req) +} + +type AddressInformationV3Result struct { + Balance NanoCoins `json:"balance"` + Code *cell.Cell `json:"code"` + Data *cell.Cell `json:"data"` + FrozenHash []byte `json:"frozen_hash"` + LastTxHash []byte `json:"last_transaction_hash"` + LastTxLT uint64 `json:"last_transaction_lt,string"` + Status string `json:"status"` // "active", "uninitialized", "frozen" +} + +// GetAddressInformation /getAddressInformation +func (v *V3) GetAddressInformation(ctx context.Context, addr *address.Address) (*AddressInformationV3Result, error) { + q := url.Values{"address": []string{addr.String()}} + return V3GetCall[AddressInformationV3Result](ctx, v, "addressInformation", q) +} + +type WalletInformationV3Result struct { + Balance NanoCoins `json:"balance"` + LastTxHash []byte `json:"last_transaction_hash"` + LastTxLT uint64 `json:"last_transaction_lt,string"` + Seqno uint64 `json:"seqno"` + Status string `json:"status"` + WalletId int64 `json:"wallet_id"` + WalletType string `json:"wallet_type"` +} + +// GetWalletInformation /getWalletInformation +func (v *V3) GetWalletInformation(ctx context.Context, addr *address.Address) (*WalletInformationV3Result, error) { + q := url.Values{"address": []string{addr.String()}} + return V3GetCall[WalletInformationV3Result](ctx, v, "walletInformation", q) +} + +type RunGetMethodV3Result struct { + GasUsed uint64 + Stack []any + ExitCode int + BlockId BlockID + LastTransactionId *TransactionID +} + +// RunGetMethod - Call contract method, stack elements could be *address.Address, *cell.Cell, *cell.Slice, and *big.Int +func (v *V3) RunGetMethod(ctx context.Context, addr *address.Address, method string, stack []any, masterSeqno *uint64) (*RunGetMethodV3Result, error) { + type runGetMethodRequest struct { + Address *address.Address `json:"address"` + Method string `json:"method,omitempty"` + Stack []stackElementV3 `json:"stack"` + MasterSeqno *uint64 `json:"seqno,omitempty"` + } + + type runGetMethodResult struct { + GasUsed uint64 `json:"gas_used"` + Stack []stackElementV3 `json:"stack"` + ExitCode int `json:"exit_code"` + BlockId BlockID `json:"block_id"` + LastTransactionId *TransactionID `json:"last_transaction_id"` + } + + var stk = []stackElementV3{} + for _, a := range stack { + switch val := a.(type) { + case *cell.Cell: + stk = append(stk, stackElementV3{ + Type: "cell", + Value: json.RawMessage(strconv.Quote(base64.StdEncoding.EncodeToString(val.ToBOC()))), + }) + case *cell.Slice: + stk = append(stk, stackElementV3{ + Type: "slice", + Value: json.RawMessage(strconv.Quote(base64.StdEncoding.EncodeToString(val.MustToCell().ToBOC()))), + }) + case *address.Address: + stk = append(stk, stackElementV3{ + Type: "slice", + Value: json.RawMessage(strconv.Quote(base64.StdEncoding.EncodeToString(cell.BeginCell().MustStoreAddr(val).EndCell().ToBOC()))), + }) + case *big.Int: + if val == nil { + return nil, fmt.Errorf("nil big.Int") + } + stk = append(stk, stackElementV3{ + Type: "num", + Value: json.RawMessage(strconv.Quote("0x" + val.Text(16))), + }) + default: + return nil, fmt.Errorf("unsupported stack element type") + } + } + + res, err := V3PostCall[runGetMethodResult](ctx, v, "runGetMethod", runGetMethodRequest{ + Address: addr, + Method: method, + Stack: stk, + MasterSeqno: masterSeqno, + }) + if err != nil { + return nil, err + } + + stack, err = parseStackV3(res.Stack) + if err != nil { + return nil, fmt.Errorf("failed to parse stack: %w", err) + } + + return &RunGetMethodV3Result{ + GasUsed: res.GasUsed, + Stack: stack, + ExitCode: res.ExitCode, + BlockId: res.BlockId, + LastTransactionId: res.LastTransactionId, + }, nil +} + +func parseStackV3(stack []stackElementV3) ([]any, error) { + var stk []any + for _, a := range stack { + switch a.Type { + case "cell", "slice": + var val string + if err := json.Unmarshal(a.Value, &val); err != nil { + return nil, fmt.Errorf("failed to unmarshal stack element: %w", err) + } + + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, err + } + + c, err := cell.FromBOC(b) + if err != nil { + return nil, err + } + + if a.Type == "cell" { + stk = append(stk, c) + } else { + stk = append(stk, c.BeginParse()) + } + case "num": + var val string + if err := json.Unmarshal(a.Value, &val); err != nil { + return nil, fmt.Errorf("failed to unmarshal stack element: %w", err) + } + + if !strings.HasPrefix(val, "0x") { + return nil, fmt.Errorf("invalid number format") + } + val = val[2:] + + res, _ := new(big.Int).SetString(val, 16) + if res == nil { + return nil, fmt.Errorf("invalid number format") + } + + stk = append(stk, res) + case "tuple": + var val []stackElementV3 + if err := json.Unmarshal(a.Value, &val); err != nil { + return nil, fmt.Errorf("failed to unmarshal stack element: %w", err) + } + + tup, err := parseStackV3(val) + if err != nil { + return nil, fmt.Errorf("failed to parse tuple: %w", err) + } + + stk = append(stk, tup) + default: + return nil, fmt.Errorf("unsupported stack element type") + } + } + return stk, nil +} + +func (v *V3) SendMessage(ctx context.Context, data []byte) error { + _, err := V3PostCall[any](ctx, v, "message", map[string][]byte{ + "boc": data, + }) + return err +} + +func V3PostCall[T any](ctx context.Context, v *V3, method string, req any) (*T, error) { + return doPOST[T](ctx, v.client, v.apiBase()+"/"+method, req, true) +} + +func V3GetCall[T any](ctx context.Context, v *V3, method string, query url.Values) (*T, error) { + return doGET[T](ctx, v.client, v.apiBase()+"/"+method, query, true) +} diff --git a/toncenter/client.go b/toncenter/client.go new file mode 100644 index 00000000..2f9b6cda --- /dev/null +++ b/toncenter/client.go @@ -0,0 +1,253 @@ +package toncenter + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +type Client struct { + http *http.Client + baseURL string // e.g. "https://toncenter.com" + apiKey string // used as X-API-Key; if empty, no auth header is set + + rl *slidingLimiter +} + +// Option configures Client. +type Option func(*Client) + +// WithAPIKey sets X-API-Key (and optional query api_key). +func WithAPIKey(key string) Option { + return func(c *Client) { + c.apiKey = key + } +} + +// WithHTTPClient allows custom http.Client (retries, tracing, proxy, etc). +func WithHTTPClient(h *http.Client) Option { + return func(c *Client) { c.http = h } +} + +// WithRateLimit limits requests per second, set 0 for no limit, default 0 +func WithRateLimit(maxPerSec float64) Option { + return func(c *Client) { + if maxPerSec > 0 { + period := 1 * time.Second + if maxPerSec < 1 { + period = time.Duration(math.Round(float64(time.Second) / maxPerSec)) + if period < time.Millisecond { + period = time.Millisecond + } + maxPerSec = 1 + } + + c.rl = newSlidingLimiter(int(maxPerSec), period) + } + } +} + +// WithTimeout sets http.Client timeout if a default client is used. +func WithTimeout(d time.Duration) Option { + return func(c *Client) { + if c.http == nil { + c.http = &http.Client{Timeout: d} + return + } + c.http.Timeout = d + } +} + +func New(baseURL string, opts ...Option) *Client { + c := &Client{ + baseURL: baseURL, + http: &http.Client{ + Timeout: 30 * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + }, + }, + } + + for _, opt := range opts { + opt(c) + } + return c +} + +type slidingLimiter struct { + mu sync.Mutex + window time.Duration + max int + times []time.Time // отсортировано по возрастанию; моменты стартов запросов +} + +func newSlidingLimiter(max int, window time.Duration) *slidingLimiter { + return &slidingLimiter{ + window: window, + max: max, + times: make([]time.Time, 0, max), + } +} + +func (l *slidingLimiter) wait(ctx context.Context) error { + for { + now := time.Now() + cutoff := now.Add(-l.window) + + l.mu.Lock() + i := 0 + for i < len(l.times) && l.times[i].Before(cutoff) { + i++ + } + if i > 0 { + l.times = l.times[i:] + } + + if len(l.times) < l.max { + l.times = append(l.times, now) + l.mu.Unlock() + return nil + } + + waitUntil := l.times[0].Add(l.window) + l.mu.Unlock() + + d := time.Until(waitUntil) + if d <= 0 { + continue + } + + timer := time.NewTimer(d) + select { + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + } + } +} + +type Response[T any] struct { + Ok bool `json:"ok"` + Result T `json:"result,omitempty"` + Error string `json:"error,omitempty"` + Code *int `json:"code,omitempty"` +} + +func doGET[T any](ctx context.Context, c *Client, path string, q url.Values, v3 bool) (*T, error) { + if q == nil { + q = url.Values{} + } + + u := path + if len(q) > 0 { + u += "?" + q.Encode() + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + if c.apiKey != "" { + req.Header.Set("X-API-Key", c.apiKey) // supported in spec + } + return do[T](c, req, v3) +} + +func doPOST[T any](ctx context.Context, c *Client, path string, body any, v3 bool) (*T, error) { + var buf bytes.Buffer + if body != nil { + if err := json.NewEncoder(&buf).Encode(body); err != nil { + return nil, err + } + } + + u := path + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, &buf) + if err != nil { + return nil, err + } + + if c.apiKey != "" { + req.Header.Set("X-API-Key", c.apiKey) + } + req.Header.Set("Content-Type", "application/json") + + return do[T](c, req, v3) +} + +func do[T any](c *Client, req *http.Request, isV3 bool) (*T, error) { + if c.rl != nil { + if err := c.rl.wait(req.Context()); err != nil { + return nil, err + } + } + + resp, err := c.http.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(io.LimitReader(resp.Body, 150<<20)) // 150MB cap + if err != nil { + return nil, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + type errorTc struct { + Error string `json:"error"` + } + + var tr errorTc + if err = json.Unmarshal(body, &tr); err != nil { + return nil, fmt.Errorf("toncenter response decode error, status code %d: %w", resp.StatusCode, err) + } + return nil, fmt.Errorf("toncenter api error, status code %d: %s, %s", resp.StatusCode, tr.Error, string(body)) + } + + if isV3 { + var tr T + if err = json.Unmarshal(body, &tr); err != nil { + return nil, fmt.Errorf("toncenter api respose parse error: %w", err) + } + + return &tr, nil + } + + var tr Response[T] + if err := json.Unmarshal(body, &tr); err != nil { + var trErr Response[string] + if err := json.Unmarshal(body, &trErr); err == nil && !trErr.Ok && trErr.Code != nil { + return nil, fmt.Errorf("toncenter api error, code %d: %s", *trErr.Code, trErr.Result) + } + + return nil, fmt.Errorf("decode error: %w; body=%s", err, string(body)) + } + + // HTTP 504 is possible (Lite Server Timeout) per spec; API still returns JSON envelope. + if !tr.Ok { + return nil, fmt.Errorf("toncenter api error: %s", tr.Error) + } + return &tr.Result, nil +} diff --git a/toncenter/client_test.go b/toncenter/client_test.go new file mode 100644 index 00000000..87ba5d9b --- /dev/null +++ b/toncenter/client_test.go @@ -0,0 +1,388 @@ +package toncenter + +import ( + "context" + "encoding/json" + "github.com/xssnick/tonutils-go/address" + "github.com/xssnick/tonutils-go/liteclient" + "github.com/xssnick/tonutils-go/tlb" + "github.com/xssnick/tonutils-go/ton" + "github.com/xssnick/tonutils-go/ton/wallet" + "github.com/xssnick/tonutils-go/tvm/cell" + "math/big" + "os" + "testing" + "time" +) + +var cli = New("https://toncenter.com", + WithTimeout(20*time.Second), + WithRateLimit(0.5), +) + +func TestTestnetEC(t *testing.T) { + cliTest := New("https://testnet.toncenter.com", + // WithAPIKey("YOUR_KEY"), // X-API-Key + WithTimeout(20*time.Second), + WithRateLimit(0.85), + ) + + t.Run("ext addr info", func(t *testing.T) { + resp, err := cliTest.V2().GetExtendedAddressInformation(context.Background(), address.MustParseAddr("0QDPMgkvdvGj6ZeGN4zaVrZNwFPmg3LUjvUxcNxEWIDSaBUR")) + if err != nil { + t.Fatal(err) + } + if resp.ExtraCurrencies[0].Currency != 100 || !resp.ExtraCurrencies[0].Balance.MustCoins(9).IsPositive() { + t.Fatal("incorrect", resp.ExtraCurrencies[0].Currency) + } + }) +} + +func TestV2(t *testing.T) { + ctx := context.Background() + t.Run("addr info", func(t *testing.T) { + resp, err := cli.V2().GetAddressInformation(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if !resp.Balance.MustCoins(9).IsPositive() { + t.Fatal("balance is incorrect") + } + }) + + t.Run("ext addr info", func(t *testing.T) { + resp, err := cli.V2().GetExtendedAddressInformation(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if !resp.Balance.MustCoins(9).IsPositive() { + t.Fatal("balance is incorrect") + } + }) + + t.Run("wallet info", func(t *testing.T) { + resp, err := cli.V2().GetWalletInformation(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if resp.Seqno <= 0 { + t.Fatal("seqno is negative") + } + }) + + var master BlockID + t.Run("master info", func(t *testing.T) { + resp, err := cli.V2().GetMasterchainInfo(ctx) + if err != nil { + t.Fatal(err) + } + if resp.Last.Seqno < 10000 { + t.Fatal("incorrect") + } + master = resp.Last + }) + + t.Run("master signatures", func(t *testing.T) { + resp, err := cli.V2().GetMasterchainBlockSignatures(ctx, master.Seqno) + if err != nil { + t.Fatal(err) + } + if len(resp.Signatures) == 0 { + t.Fatal("incorrect") + } + }) + + var shard BlockID + t.Run("get shards", func(t *testing.T) { + resp, err := cli.V2().GetShards(ctx, master.Seqno) + if err != nil { + t.Fatal(err) + } + if len(resp) == 0 { + t.Fatal("incorrect") + } + shard = resp[0] + }) + + t.Run("shard block proof", func(t *testing.T) { + resp, err := cli.V2().GetShardBlockProof(ctx, shard.Workchain, shard.Shard, shard.Seqno, nil) + if err != nil { + t.Fatal(err) + } + json.NewEncoder(os.Stdout).Encode(resp) + if resp == nil { + t.Fatal("incorrect") + } + }) + + t.Run("addr balance", func(t *testing.T) { + resp, err := cli.V2().GetAddressBalance(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if !resp.MustCoins(TonDecimals).IsPositive() { + t.Fatal("balance is negative") + } + }) + + var tx *TransactionV2 + t.Run("get transactions", func(t *testing.T) { + resp, err := cli.V2().GetTransactions(context.Background(), address.MustParseAddr("EQAYqo4u7VF0fa4DPAebk4g9lBytj2VFny7pzXR0trjtXQaO"), nil) + if err != nil { + t.Fatal(err) + } + if len(resp) == 0 { + t.Fatal("no transactions") + } + tx = &resp[0] + }) + + t.Run("get consensus block", func(t *testing.T) { + resp, err := cli.V2().GetConsensusBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + if resp.Seqno == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("lookup block", func(t *testing.T) { + seqno := master.Seqno - 5 + resp, err := cli.V2().LookupBlock(context.Background(), master.Workchain, master.Shard, &LookupBlockV2Options{ + Seqno: &seqno, + }) + if err != nil { + t.Fatal(err) + } + if resp.Seqno == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("get block transactions", func(t *testing.T) { + resp, err := cli.V2().GetBlockTransactions(context.Background(), shard.Workchain, shard.Shard, shard.Seqno, &GetBlockTransactionsV2Options{ + RootHash: shard.RootHash, + }) + if err != nil { + t.Fatal(err) + } + if len(resp.Transactions) == 0 || resp.Transactions[0].LT == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("get block transactions ext", func(t *testing.T) { + resp, err := cli.V2().GetBlockTransactionsExt(context.Background(), shard.Workchain, shard.Shard, shard.Seqno, &GetBlockTransactionsV2Options{ + RootHash: shard.RootHash, + }) + if err != nil { + t.Fatal(err) + } + if len(resp.Transactions) == 0 || !resp.Transactions[0].Fee.MustCoins(9).IsPositive() { + t.Fatal("incorrect") + } + }) + + t.Run("get block header", func(t *testing.T) { + if len(master.RootHash) == 0 { + t.Fatal("root hash empty") + } + + resp, err := cli.V2().GetBlockHeader(context.Background(), master.Workchain, master.Shard, master.Seqno, &GetBlockHeaderOptions{ + RootHash: master.RootHash, + FileHash: master.FileHash, + }) + if err != nil { + // since this api call may don't work on the toncenter side, it we skip it in test, just log + t.Fatal(err) + return + } + if len(resp.PrevBlocks) == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("get config param", func(t *testing.T) { + resp, err := cli.V2().GetConfigParam(context.Background(), 11, nil) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("incorrect") + } + }) + + t.Run("get config all", func(t *testing.T) { + resp, err := cli.V2().GetConfigAll(context.Background(), nil) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("incorrect") + } + }) + + t.Run("get out msg queue size", func(t *testing.T) { + resp, err := cli.V2().GetOutMsgQueueSizes(context.Background()) + if err != nil { + t.Fatal(err) + } + if resp.SizeLimit == 0 || len(resp.Shards) == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("get token data", func(t *testing.T) { + resp, err := cli.V2().GetTokenData(context.Background(), address.MustParseAddr("EQCxE6mUtQJKFnGfaROTKOt1lZbDiiX1kCixRv7Nw2Id_sDs")) + if err != nil { + t.Fatal(err) + } + if !resp.Mintable { + t.Fatal("incorrect") + } + + }) + + t.Run("try locate tx", func(t *testing.T) { + resp, err := cli.V2().TryLocateTx(context.Background(), tx.InMsg.Source.Addr, tx.InMsg.Destination.Addr, tx.InMsg.CreatedLt) + if err != nil { + t.Fatal(err) + } + if resp.InMsg.Source.Addr.String() != tx.InMsg.Source.Addr.String() { + t.Fatal("incorrect") + } + }) + + t.Run("try locate result tx", func(t *testing.T) { + resp, err := cli.V2().TryLocateResultTx(context.Background(), tx.InMsg.Source.Addr, tx.InMsg.Destination.Addr, tx.InMsg.CreatedLt) + if err != nil { + t.Fatal(err) + } + if resp.InMsg.Source.Addr.String() != tx.InMsg.Source.Addr.String() { + t.Fatal("incorrect") + } + tx = resp + }) + + t.Run("try locate source tx", func(t *testing.T) { + resp, err := cli.V2().TryLocateSourceTx(context.Background(), tx.InMsg.Source.Addr, tx.InMsg.Destination.Addr, tx.InMsg.CreatedLt) + if err != nil { + // as Toncenter team said, it is not always works on api side, thats why it is deprecated + t.Log(err) + + return + } + if resp.InMsg.Destination.Addr.String() != tx.InMsg.Source.Addr.String() { + t.Fatal("incorrect") + } + }) + + t.Run("estimate fee", func(t *testing.T) { + w, err := wallet.FromSeedWithOptions(ton.NewAPIClient(liteclient.NewOfflineClient()), wallet.NewSeed(), wallet.V4R2) + if err != nil { + t.Fatal(err) + } + w.GetSpec().(*wallet.SpecV4R2).SetSeqnoFetcher(func(ctx context.Context, sub uint32) (uint32, error) { + // Get seqno from your database here, this func will be called during BuildTransfer to get seqno for transaction + return 1, nil + }) + + outMsg := wallet.SimpleMessage(w.WalletAddress(), tlb.MustFromTON("0.1"), cell.BeginCell().EndCell()) + msg, err := w.PrepareExternalMessageForMany(context.Background(), true, []*wallet.Message{outMsg}) + if err != nil { + t.Fatal(err) + } + + resp, err := cli.V2().EstimateFee(context.Background(), EstimateFeeRequest{ + Address: w.WalletAddress(), + Body: msg.Body, + InitCode: msg.StateInit.Code, + InitData: msg.StateInit.Data, + IgnoreChkSig: true, + }) + if err != nil { + t.Fatal(err) + } + if resp.SourceFees.InFwdFee == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("run get method", func(t *testing.T) { + resp, err := cli.V2().RunGetMethod(context.Background(), + address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N"), "seqno", []any{big.NewInt(123)}, nil) + if err != nil { + t.Fatal(err) + } + if resp.Stack[0].(*big.Int).Uint64() == 0 { + t.Fatal("incorrect") + } + }) + +} + +func TestV3(t *testing.T) { + ctx := context.Background() + t.Run("addr info", func(t *testing.T) { + resp, err := cli.V3().GetAddressInformation(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if !resp.Balance.MustCoins(9).IsPositive() { + t.Fatal("balance is incorrect") + } + }) + + t.Run("wallet info", func(t *testing.T) { + resp, err := cli.V3().GetWalletInformation(ctx, address.MustParseAddr("EQCD39VS5jcptHL8vMjEXrzGaRcCVYto7HUn4bpAOg8xqB2N")) + if err != nil { + t.Fatal(err) + } + if resp.Seqno <= 0 { + t.Fatal("seqno is negative") + } + }) + + t.Run("run get method", func(t *testing.T) { + resp, err := cli.V3().RunGetMethod(context.Background(), + address.MustParseAddr("EQB9cFa5EIS-v9LXhJ_MbuYxg9wBm7kOH81qAMVDlZx4AJ_K"), "get_providers", []any{big.NewInt(123), cell.BeginCell().EndCell(), cell.BeginCell().ToSlice(), address.MustParseAddr("EQB9cFa5EIS-v9LXhJ_MbuYxg9wBm7kOH81qAMVDlZx4AJ_K")}, nil) + if err != nil { + t.Fatal(err) + } + if resp.Stack[5].(*big.Int).Uint64() == 0 { + t.Fatal("incorrect") + } + }) + + t.Run("estimate fee", func(t *testing.T) { + w, err := wallet.FromSeedWithOptions(ton.NewAPIClient(liteclient.NewOfflineClient()), wallet.NewSeed(), wallet.V4R2) + if err != nil { + t.Fatal(err) + } + w.GetSpec().(*wallet.SpecV4R2).SetSeqnoFetcher(func(ctx context.Context, sub uint32) (uint32, error) { + // Get seqno from your database here, this func will be called during BuildTransfer to get seqno for transaction + return 1, nil + }) + + outMsg := wallet.SimpleMessage(w.WalletAddress(), tlb.MustFromTON("0.1"), cell.BeginCell().EndCell()) + msg, err := w.PrepareExternalMessageForMany(context.Background(), true, []*wallet.Message{outMsg}) + if err != nil { + t.Fatal(err) + } + + resp, err := cli.V3().EstimateFee(context.Background(), EstimateFeeRequest{ + Address: w.WalletAddress(), + Body: msg.Body, + InitCode: msg.StateInit.Code, + InitData: msg.StateInit.Data, + IgnoreChkSig: true, + }) + if err != nil { + t.Fatal(err) + } + if resp.SourceFees.InFwdFee == 0 { + t.Fatal("incorrect") + } + }) +} diff --git a/toncenter/types.go b/toncenter/types.go new file mode 100644 index 00000000..e7c35106 --- /dev/null +++ b/toncenter/types.go @@ -0,0 +1,51 @@ +package toncenter + +import ( + "encoding/json" + "github.com/xssnick/tonutils-go/tlb" + "math/big" +) + +const TonDecimals = 9 + +type TransactionID struct { + LT uint64 `json:"lt,string"` + Hash []byte `json:"hash"` +} + +type BlockID struct { + Workchain int32 `json:"workchain"` + Shard int64 `json:"shard,string"` + Seqno uint64 `json:"seqno"` + RootHash []byte `json:"root_hash"` + FileHash []byte `json:"file_hash"` +} + +type NanoCoins struct { + val *big.Int +} + +func (n *NanoCoins) Coins(decimals int) (tlb.Coins, error) { + return tlb.FromNano(n.val, decimals) +} + +func (n *NanoCoins) MustCoins(decimals int) tlb.Coins { + c, err := n.Coins(decimals) + if err != nil { + panic(err) + } + return c +} + +func (n *NanoCoins) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + n.val = new(big.Int) + return n.val.UnmarshalText([]byte(s)) +} + +func (n *NanoCoins) MarshalJSON() ([]byte, error) { + return json.Marshal(n.val.String()) +} diff --git a/tvm/cell/cell.go b/tvm/cell/cell.go index f2f469a6..faf27cf1 100644 --- a/tvm/cell/cell.go +++ b/tvm/cell/cell.go @@ -282,10 +282,14 @@ func (c *Cell) GetType() Type { func (c *Cell) UnmarshalJSON(bytes []byte) error { if len(bytes) < 2 || bytes[0] != '"' || bytes[len(bytes)-1] != '"' { - return fmt.Errorf("invalid data") + return fmt.Errorf("invalid cell data") } bytes = bytes[1 : len(bytes)-1] + if len(bytes) == 0 { + return nil + } + data := make([]byte, base64.StdEncoding.DecodedLen(len(bytes))) n, err := base64.StdEncoding.Decode(data, bytes)