diff --git a/CHANGELOG.md b/CHANGELOG.md index 44ffe60fd..05d36674b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [Unreleased] +- chore(deps): Remove dependency on github.com/pkg/errors (#2184) - chore(deps): Migrate from OpenCensus to OpenTelemetry (#2169) ## [4.5.1] - 2025-01-21 diff --git a/README.md b/README.md index fe581a752..6e1b9406b 100644 --- a/README.md +++ b/README.md @@ -5,12 +5,11 @@ [![Sourcegraph](https://sourcegraph.com/github.com/hypermodeinc/badger/-/badge.svg)](https://sourcegraph.com/github.com/hypermodeinc/badger?badge) [![ci-badger-tests](https://github.com/hypermodeinc/badger/actions/workflows/ci-badger-tests.yml/badge.svg)](https://github.com/hypermodeinc/badger/actions/workflows/ci-badger-tests.yml) [![ci-badger-bank-tests](https://github.com/hypermodeinc/badger/actions/workflows/ci-badger-bank-tests.yml/badge.svg)](https://github.com/hypermodeinc/badger/actions/workflows/ci-badger-bank-tests.yml) -[![ci-golang-lint](https://github.com/hypermodeinc/badger/actions/workflows/ci-golang-lint.yml/badge.svg)](https://github.com/hypermodeinc/badger/actions/workflows/ci-golang-lint.yml) ![Badger mascot](images/diggy-shadow.png) BadgerDB is an embeddable, persistent and fast key-value (KV) database written in pure Go. It is the -underlying database for [Dgraph](https://dgraph.io), a fast, distributed graph database. It's meant +underlying database for [Dgraph](https://github.com/hypermodeinc/dgraph), a fast, distributed graph database. It's meant to be a performant alternative to non-Go-based key-value stores like RocksDB. ## Project Status @@ -24,18 +23,8 @@ Jaeger Tracing, UsenetExpress, and many more. The list of projects using Badger can be found [here](#projects-using-badger). -Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible with v1.0 is -v1.6.0. - -Badger v2.0 was released in Nov 2019 with a new storage format which won't be compatible with all of -the v1.x. Badger v2.0 supports compression, encryption and uses a cache to speed up lookup. - -Badger v3.0 was released in January 2021. This release improves compaction performance. - Please consult the [Changelog] for more detailed information on releases. -For more details on our version naming schema please read [Choosing a version](#choosing-a-version). - [Changelog]: https://github.com/hypermodeinc/badger/blob/main/CHANGELOG.md ## Table of Contents @@ -82,30 +71,6 @@ go install . This will install the badger command line utility into your $GOBIN path. -#### Choosing a version - -BadgerDB is a pretty special package from the point of view that the most important change we can -make to it is not on its API but rather on how data is stored on disk. - -This is why we follow a version naming schema that differs from Semantic Versioning. - -- New major versions are released when the data format on disk changes in an incompatible way. -- New minor versions are released whenever the API changes but data compatibility is maintained. - Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning. -- New patch versions are released when there's no changes to the data format nor the API. - -Following these rules: - -- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major - version is the same, therefore the data format on disk is compatible. -- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with - v1.6.0 will need to be converted into the new format before they can be used by v2.0.0. -- v2.x.x and v3.x.x are data incompatible as their major version implies, so files created with - v2.x.x will need to be converted into the new format before they can be used by v3.0.0. - -For a longer explanation on the reasons behind using a new versioning naming schema, you can read -[VERSIONING](VERSIONING.md). - ## Badger Documentation Badger Documentation is available at https://docs.hypermode.com/badger @@ -114,10 +79,10 @@ Badger Documentation is available at https://docs.hypermode.com/badger ### Blog Posts -1. [Introducing Badger: A fast key-value store written natively in Go](https://open.dgraph.io/post/badger/) -2. [Make Badger crash resilient with ALICE](https://open.dgraph.io/post/alice/) -3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://open.dgraph.io/post/badger-lmdb-boltdb/) -4. [Concurrent ACID Transactions in Badger](https://open.dgraph.io/post/badger-txn/) +1. [Introducing Badger: A fast key-value store written natively in Go](https://hypermode.com/blog/badger/) +2. [Make Badger crash resilient with ALICE](https://hypermode.com/blog/alice/) +3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://hypermode.com/blog/badger-lmdb-boltdb/) +4. [Concurrent ACID Transactions in Badger](https://hypermode.com/blog/badger-txn/) ## Design @@ -155,7 +120,7 @@ values from keys, significantly reducing the write amplification compared to a t rotating disks. As such RocksDB's design isn't aimed at SSDs. 3 SSI: Serializable Snapshot Isolation. For more details, see the blog post -[Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) +[Concurrent ACID Transactions in Badger](https://hypermode.com/blog/badger-txn/) 4 Badger provides direct access to value versions via its Iterator API. Users can also specify how many versions to keep per key via Options. @@ -277,6 +242,11 @@ Below is a list of known projects that use Badger: and badger db - [MightyMap](https://github.com/thisisdevelopment/mightymap) - Mightymap: Conveys both robustness and high capability, fitting for a powerful concurrent map. +- [FlowG](https://github.com/link-society/flowg) - A low-code log processing facility +- [Bluefin](https://github.com/blinklabs-io/bluefin) - Bluefin is a TUNA Proof of Work miner for + the Fortuna smart contract on the Cardano blockchain +- [cDNSd](https://github.com/blinklabs-io/cdnsd) - A Cardano blockchain backed DNS server daemon +- [Dingo](https://github.com/blinklabs-io/dingo) - A Cardano blockchain data node If you are using Badger in a project please send a pull request to add it to the list. @@ -287,6 +257,6 @@ If you're interested in contributing to Badger see [CONTRIBUTING](./CONTRIBUTING ## Contact - Please use [Github issues](https://github.com/hypermodeinc/badger/issues) for filing bugs. -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, discussions, and feature +- Please use [discuss.hypermode.com](https://discuss.hypermode.com) for questions, discussions, and feature requests. -- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). +- Follow us on Twitter [@hypermodeinc](https://twitter.com/hypermodeinc). diff --git a/backup.go b/backup.go index cce62d090..4337e4d16 100644 --- a/backup.go +++ b/backup.go @@ -10,9 +10,9 @@ import ( "bytes" "context" "encoding/binary" + "fmt" "io" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/pb" @@ -61,7 +61,7 @@ func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { return list, nil } if item.Version() < since { - return nil, errors.Errorf("Backup: Item Version: %d less than sinceTs: %d", + return nil, fmt.Errorf("Backup: Item Version: %d less than sinceTs: %d", item.Version(), since) } diff --git a/badger/cmd/flatten.go b/badger/cmd/flatten.go index 31d3cfc27..1c0ae7938 100644 --- a/badger/cmd/flatten.go +++ b/badger/cmd/flatten.go @@ -6,10 +6,10 @@ package cmd import ( + "errors" "fmt" "math" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/dgraph-io/badger/v4" @@ -57,7 +57,7 @@ func flatten(cmd *cobra.Command, args []string) error { return err } if fo.compressionType > 2 { - return errors.Errorf( + return errors.New( "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") } opt := badger.DefaultOptions(sstDir). diff --git a/badger/cmd/info.go b/badger/cmd/info.go index 3f790c6f1..82795167e 100644 --- a/badger/cmd/info.go +++ b/badger/cmd/info.go @@ -17,7 +17,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/dgraph-io/badger/v4" @@ -198,7 +197,7 @@ func lookup(db *badger.DB) error { itr.Rewind() if !itr.Valid() { - return errors.Errorf("Unable to rewind to key:\n%s", hex.Dump(key)) + return fmt.Errorf("Unable to rewind to key:\n%s", hex.Dump(key)) } fmt.Println() item := itr.Item() diff --git a/badger/cmd/read_bench.go b/badger/cmd/read_bench.go index acde52a69..fd158156c 100644 --- a/badger/cmd/read_bench.go +++ b/badger/cmd/read_bench.go @@ -7,6 +7,7 @@ package cmd import ( "context" + "errors" "fmt" "math" "math/rand" @@ -14,7 +15,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" "github.com/spf13/cobra" "google.golang.org/protobuf/proto" @@ -195,7 +195,7 @@ func getSampleKeys(db *badger.DB, sampleSize int) ([][]byte, error) { return l, nil } - errStop := errors.Errorf("Stop iterating") + errStop := errors.New("Stop iterating") ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream.Send = func(buf *z.Buffer) error { diff --git a/badger/cmd/stream.go b/badger/cmd/stream.go index ad3c94598..c86dd9ce9 100644 --- a/badger/cmd/stream.go +++ b/badger/cmd/stream.go @@ -6,12 +6,12 @@ package cmd import ( + "errors" "fmt" "io" "math" "os" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/dgraph-io/badger/v4" @@ -75,7 +75,7 @@ func stream(cmd *cobra.Command, args []string) error { // Options for output DB. if so.compressionType > 2 { - return errors.Errorf( + return errors.New( "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") } inDB, err := badger.OpenManaged(inOpt) @@ -96,7 +96,7 @@ func stream(cmd *cobra.Command, args []string) error { _, err = f.Readdirnames(1) if err != io.EOF { - return errors.Errorf( + return fmt.Errorf( "cannot run stream tool on non-empty output directory %s", so.outDir) } } diff --git a/batch.go b/batch.go index afef08cae..63e619097 100644 --- a/batch.go +++ b/batch.go @@ -6,10 +6,11 @@ package badger import ( + "errors" + "fmt" "sync" "sync/atomic" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/pb" @@ -220,7 +221,7 @@ func (wb *WriteBatch) Flush() error { if err := wb.throttle.Finish(); err != nil { if wb.Error() != nil { - return errors.Errorf("wb.err: %s err: %s", wb.Error(), err) + return fmt.Errorf("wb.err: %w err: %w", wb.Error(), err) } return err } diff --git a/db.go b/db.go index 658963759..f30949ebf 100644 --- a/db.go +++ b/db.go @@ -9,7 +9,7 @@ import ( "bytes" "context" "encoding/binary" - stderrors "errors" + "errors" "expvar" "fmt" "math" @@ -22,7 +22,6 @@ import ( "time" humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" "github.com/dgraph-io/badger/v4/fb" "github.com/dgraph-io/badger/v4/options" @@ -145,14 +144,14 @@ func checkAndSetOptions(opt *Options) error { // We are limiting opt.ValueThreshold to maxValueThreshold for now. if opt.ValueThreshold > maxValueThreshold { - return errors.Errorf("Invalid ValueThreshold, must be less or equal to %d", + return fmt.Errorf("Invalid ValueThreshold, must be less or equal to %d", maxValueThreshold) } // If ValueThreshold is greater than opt.maxBatchSize, we won't be able to push any data using // the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize. if opt.ValueThreshold > opt.maxBatchSize { - return errors.Errorf("Valuethreshold %d greater than max batch size of %d. Either "+ + return fmt.Errorf("Valuethreshold %d greater than max batch size of %d. Either "+ "reduce opt.ValueThreshold or increase opt.BaseTableSize.", opt.ValueThreshold, opt.maxBatchSize) } @@ -373,7 +372,7 @@ func Open(opt Options) (*DB, error) { go db.threshold.listenForValueThresholdUpdate() if err := db.initBannedNamespaces(); err != nil { - return db, errors.Wrapf(err, "While setting banned keys") + return db, fmt.Errorf("While setting banned keys: %w", err) } db.closers.writes = z.NewCloser(1) @@ -787,7 +786,7 @@ func (db *DB) writeToLSM(b *request) error { // running in InMemory mode. In InMemory mode, we don't write anything to the // value log and that's why the length of b.Ptrs will always be zero. if !db.opt.InMemory && len(b.Ptrs) != len(b.Entries) { - return errors.Errorf("Ptrs and Entries don't match: %+v", b) + return fmt.Errorf("Ptrs and Entries don't match: %+v", b) } for i, entry := range b.Entries { @@ -1005,7 +1004,7 @@ func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { return nil } -var errNoRoom = stderrors.New("No room for write") +var errNoRoom = errors.New("No room for write") // ensureRoomForWrite is always called serially. func (db *DB) ensureRoomForWrite() error { @@ -1963,7 +1962,7 @@ func createDirs(opt Options) error { } if !dirExists { if opt.ReadOnly { - return errors.Errorf("Cannot find directory %q for read-only open", path) + return fmt.Errorf("Cannot find directory %q for read-only open", path) } // Try to create the directory err = os.MkdirAll(path, 0700) @@ -2035,7 +2034,7 @@ func (db *DB) CacheMaxCost(cache CacheType, maxCost int64) (int64, error) { case IndexCache: return db.indexCache.MaxCost(), nil default: - return 0, errors.Errorf("invalid cache type") + return 0, errors.New("invalid cache type") } } @@ -2047,7 +2046,7 @@ func (db *DB) CacheMaxCost(cache CacheType, maxCost int64) (int64, error) { db.indexCache.UpdateMaxCost(maxCost) return maxCost, nil default: - return 0, errors.Errorf("invalid cache type") + return 0, errors.New("invalid cache type") } } diff --git a/go.mod b/go.mod index 2d263f3ef..255c9cbda 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,22 @@ module github.com/dgraph-io/badger/v4 -go 1.22.12 +go 1.23.0 -toolchain go1.24.0 +toolchain go1.24.1 require ( github.com/cespare/xxhash/v2 v2.3.0 - github.com/dgraph-io/ristretto/v2 v2.1.0 + github.com/dgraph-io/ristretto/v2 v2.2.0 github.com/dustin/go-humanize v1.0.1 github.com/google/flatbuffers v25.2.10+incompatible github.com/klauspost/compress v1.18.0 - github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.9.1 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/contrib/zpages v0.59.0 - go.opentelemetry.io/otel v1.34.0 - golang.org/x/net v0.35.0 - golang.org/x/sys v0.30.0 - google.golang.org/protobuf v1.36.5 + go.opentelemetry.io/contrib/zpages v0.60.0 + go.opentelemetry.io/otel v1.35.0 + golang.org/x/net v0.38.0 + golang.org/x/sys v0.31.0 + google.golang.org/protobuf v1.36.6 ) require ( @@ -29,9 +28,9 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.6 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index bf4beebb9..f7e4ae0e1 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,10 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= -github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -16,8 +16,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -28,8 +28,6 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -43,22 +41,24 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/zpages v0.59.0 h1:t0H5zUy8fifIhRuVwm2FrA/D70Kk10SSpAEvvbaNscw= -go.opentelemetry.io/contrib/zpages v0.59.0/go.mod h1:9wo+yUPvHnBQEzoHJ8R3nA/Q5rkef7HjtLlSFI0Tgrc= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +go.opentelemetry.io/contrib/zpages v0.60.0 h1:wOM9ie1Hz4H88L9KE6GrGbKJhfm+8F1NfW/Y3q9Xt+8= +go.opentelemetry.io/contrib/zpages v0.60.0/go.mod h1:xqfToSRGh2MYUsfyErNz8jnNDPlnpZqWM/y6Z2Cx7xw= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/levels.go b/levels.go index 48a954316..3cc7bf694 100644 --- a/levels.go +++ b/levels.go @@ -9,7 +9,7 @@ import ( "bytes" "context" "encoding/hex" - stderrors "errors" + "errors" "fmt" "math" "math/rand" @@ -20,7 +20,6 @@ import ( "sync/atomic" "time" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -1502,7 +1501,7 @@ func tablesToString(tables []*table.Table) []string { return res } -var errFillTables = stderrors.New("Unable to fill tables") +var errFillTables = errors.New("Unable to fill tables") // doCompact picks some table on level l and compacts it away to the next level. func (s *levelsController) doCompact(id int, p compactionPriority) error { diff --git a/manifest.go b/manifest.go index 66753788f..4d03c7499 100644 --- a/manifest.go +++ b/manifest.go @@ -9,7 +9,7 @@ import ( "bufio" "bytes" "encoding/binary" - stderrors "errors" + "errors" "fmt" "hash/crc32" "io" @@ -18,7 +18,6 @@ import ( "path/filepath" "sync" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/options" @@ -342,8 +341,8 @@ func (r *countingReader) ReadByte() (b byte, err error) { } var ( - errBadMagic = stderrors.New("manifest has bad magic") - errBadChecksum = stderrors.New("manifest has checksum mismatch") + errBadMagic = errors.New("manifest has bad magic") + errBadChecksum = errors.New("manifest has checksum mismatch") ) // ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one @@ -399,7 +398,7 @@ func ReplayManifestFile(fp *os.File, extMagic uint16) (Manifest, int64, error) { length := y.BytesToU32(lenCrcBuf[0:4]) // Sanity check to ensure we don't over-allocate memory. if length > uint32(stat.Size()) { - return Manifest{}, 0, errors.Errorf( + return Manifest{}, 0, fmt.Errorf( "Buffer length: %d greater than file size: %d. Manifest file might be corrupted", length, stat.Size()) } diff --git a/memtable.go b/memtable.go index 7d026d128..58461cfb9 100644 --- a/memtable.go +++ b/memtable.go @@ -22,8 +22,6 @@ import ( "sync" "sync/atomic" - "github.com/pkg/errors" - "github.com/dgraph-io/badger/v4/pb" "github.com/dgraph-io/badger/v4/skl" "github.com/dgraph-io/badger/v4/y" @@ -147,7 +145,7 @@ func (db *DB) newMemTable() (*memTable, error) { db.opt.Errorf("Got error: %v for id: %d\n", err, db.nextMemFid) return nil, y.Wrapf(err, "newMemTable") } - return nil, errors.Errorf("File %s already exists", mt.wal.Fd.Name()) + return nil, fmt.Errorf("File %s already exists", mt.wal.Fd.Name()) } func (db *DB) mtFilePath(fid int) string { diff --git a/options.go b/options.go index 218b94772..54eeed51f 100644 --- a/options.go +++ b/options.go @@ -13,8 +13,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/dgraph-io/badger/v4/options" "github.com/dgraph-io/badger/v4/table" "github.com/dgraph-io/badger/v4/y" @@ -234,10 +232,10 @@ func parseCompression(cStr string) (options.CompressionType, int, error) { y.Check(err) if level <= 0 { return 0, 0, - errors.Errorf("ERROR: compression level(%v) must be greater than zero", level) + fmt.Errorf("ERROR: compression level(%v) must be greater than zero", level) } } else if len(cStrSplit) > 2 { - return 0, 0, errors.Errorf("ERROR: Invalid badger.compression argument") + return 0, 0, fmt.Errorf("ERROR: Invalid badger.compression argument") } switch cType { case "zstd": @@ -247,7 +245,7 @@ func parseCompression(cStr string) (options.CompressionType, int, error) { case "none": return options.None, 0, nil } - return 0, 0, errors.Errorf("ERROR: compression type (%s) invalid", cType) + return 0, 0, fmt.Errorf("ERROR: compression type (%s) invalid", cType) } // generateSuperFlag generates an identical SuperFlag string from the provided Options. diff --git a/publisher_test.go b/publisher_test.go index 987ca0c30..58f35352e 100644 --- a/publisher_test.go +++ b/publisher_test.go @@ -7,13 +7,13 @@ package badger import ( "context" + "errors" "fmt" "runtime" "sync" "sync/atomic" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/dgraph-io/badger/v4/pb" diff --git a/stream_writer.go b/stream_writer.go index 674a33afa..981b6e932 100644 --- a/stream_writer.go +++ b/stream_writer.go @@ -11,7 +11,6 @@ import ( "sync" humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/pb" @@ -119,7 +118,7 @@ func (sw *StreamWriter) PrepareIncremental() error { // on the tree, all the data will go to Lmax. All the levels above will be empty // after flatten call. Now, we should be able to use incremental stream writer again. if err := sw.db.Flatten(3); err != nil { - return errors.Wrapf(err, "error during flatten in StreamWriter") + return fmt.Errorf("error during flatten in StreamWriter: %w", err) } sw.prevLevel = len(sw.db.Levels()) - 1 } @@ -417,7 +416,7 @@ func (w *sortedWriter) handleRequests() { // Add adds key and vs to sortedWriter. func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error { if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 { - return errors.Errorf("keys not in sorted order (last key: %s, key: %s)", + return fmt.Errorf("keys not in sorted order (last key: %s, key: %s)", hex.Dump(w.lastKey), hex.Dump(key)) } diff --git a/table/builder.go b/table/builder.go index d590f0961..70ebc99bd 100644 --- a/table/builder.go +++ b/table/builder.go @@ -7,6 +7,7 @@ package table import ( "crypto/aes" + "errors" "math" "runtime" "sync" @@ -15,7 +16,6 @@ import ( fbs "github.com/google/flatbuffers/go" "github.com/klauspost/compress/s2" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/fb" diff --git a/table/table.go b/table/table.go index d729d8982..a32515e2d 100644 --- a/table/table.go +++ b/table/table.go @@ -9,6 +9,7 @@ import ( "bytes" "crypto/aes" "encoding/binary" + "errors" "fmt" "math" "os" @@ -22,7 +23,6 @@ import ( "github.com/klauspost/compress/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "google.golang.org/protobuf/proto" "github.com/dgraph-io/badger/v4/fb" @@ -250,7 +250,7 @@ func CreateTable(fname string, builder *Builder) (*Table, error) { } else if err != nil { return nil, y.Wrapf(err, "while creating table: %s", fname) } else { - return nil, errors.Errorf("file already exists: %s", fname) + return nil, fmt.Errorf("file already exists: %s", fname) } written := bd.Copy(mf.Data) @@ -281,7 +281,7 @@ func OpenTable(mf *z.MmapFile, opts Options) (*Table, error) { id, ok := ParseFileID(filename) if !ok { mf.Close(-1) - return nil, errors.Errorf("Invalid filename: %s", filename) + return nil, fmt.Errorf("Invalid filename: %s", filename) } t := &Table{ MmapFile: mf, diff --git a/trie/trie.go b/trie/trie.go index 9c0ee7cdb..5675a4e14 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -10,8 +10,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" - "github.com/dgraph-io/badger/v4/pb" "github.com/dgraph-io/badger/v4/y" ) @@ -123,7 +121,7 @@ func (t *Trie) fix(m pb.Match, id uint64, op int) error { ignore, err := parseIgnoreBytes(m.IgnoreBytes) if err != nil { - return errors.Wrapf(err, "while parsing ignore bytes: %s", m.IgnoreBytes) + return fmt.Errorf( "while parsing ignore bytes: %s: %w", m.IgnoreBytes,err) } for len(ignore) < len(m.Prefix) { ignore = append(ignore, false) diff --git a/txn.go b/txn.go index 50d17a5bc..d5a6af524 100644 --- a/txn.go +++ b/txn.go @@ -9,14 +9,14 @@ import ( "bytes" "context" "encoding/hex" + "errors" + "fmt" "math" "sort" "strconv" "sync" "sync/atomic" - "github.com/pkg/errors" - "github.com/dgraph-io/badger/v4/y" "github.com/dgraph-io/ristretto/v2/z" ) @@ -344,7 +344,7 @@ func (txn *Txn) checkSize(e *Entry) error { } func exceedsSize(prefix string, max int64, key []byte) error { - return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s", + return fmt.Errorf("%s with size %d exceeded %d limit. %s:\n%s", prefix, len(key), max, prefix, hex.Dump(key[:1<<10])) } diff --git a/util.go b/util.go index d80b8f318..d99abee04 100644 --- a/util.go +++ b/util.go @@ -7,12 +7,11 @@ package badger import ( "encoding/hex" + "fmt" "math/rand" "os" "time" - "github.com/pkg/errors" - "github.com/dgraph-io/badger/v4/table" "github.com/dgraph-io/badger/v4/y" ) @@ -37,11 +36,11 @@ func (s *levelHandler) validate() error { numTables := len(s.tables) for j := 1; j < numTables; j++ { if j >= len(s.tables) { - return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) + return fmt.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) } if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { - return errors.Errorf( + return fmt.Errorf( "Inter: Biggest(j-1)[%d] \n%s\n vs Smallest(j)[%d]: \n%s\n: "+ "level=%d j=%d numTables=%d", s.tables[j-1].ID(), hex.Dump(s.tables[j-1].Biggest()), s.tables[j].ID(), @@ -49,7 +48,7 @@ func (s *levelHandler) validate() error { } if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { - return errors.Errorf( + return fmt.Errorf( "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d", hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables) } diff --git a/value.go b/value.go index 80cc4c436..e30a80cfb 100644 --- a/value.go +++ b/value.go @@ -8,7 +8,7 @@ package badger import ( "bytes" "context" - stderrors "errors" + "errors" "fmt" "hash" "hash/crc32" @@ -21,7 +21,6 @@ import ( "sync" "sync/atomic" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -54,8 +53,8 @@ const ( vlogHeaderSize = 20 ) -var errStop = stderrors.New("Stop iteration") -var errTruncate = stderrors.New("Do truncate") +var errStop = errors.New("Stop iteration") +var errTruncate = errors.New("Do truncate") type logEntry func(e Entry, vp valuePointer) error @@ -165,7 +164,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { for _, fid := range vlog.filesToBeDeleted { if fid == f.fid { vlog.filesLock.RUnlock() - return errors.Errorf("value log file already marked for deletion fid: %d", fid) + return fmt.Errorf("value log file already marked for deletion fid: %d", fid) } } maxFid := vlog.maxFid @@ -194,7 +193,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { // Value is still present in value log. if len(vs.Value) == 0 { - return errors.Errorf("Empty value: %+v", vs) + return fmt.Errorf("Empty value: %+v", vs) } var vp valuePointer vp.Decode(vs.Value) @@ -334,7 +333,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { // Just a sanity-check. if _, ok := vlog.filesMap[f.fid]; !ok { vlog.filesLock.Unlock() - return errors.Errorf("Unable to find fid: %d", f.fid) + return fmt.Errorf("Unable to find fid: %d", f.fid) } if vlog.iteratorCount() == 0 { delete(vlog.filesMap, f.fid) @@ -754,7 +753,7 @@ func (vlog *valueLog) validateWrites(reqs []*request) error { size := estimateRequestSize(req) estimatedVlogOffset := vlogOffset + size if estimatedVlogOffset > uint64(maxVlogFileSize) { - return errors.Errorf("Request size offset %d is bigger than maximum offset %d", + return fmt.Errorf("Request size offset %d is bigger than maximum offset %d", estimatedVlogOffset, maxVlogFileSize) } @@ -906,7 +905,7 @@ func (vlog *valueLog) getFileRLocked(vp valuePointer) (*logFile, error) { ret, ok := vlog.filesMap[vp.Fid] if !ok { // log file has gone away, we can't do anything. Return. - return nil, errors.Errorf("file with ID: %d not found", vp.Fid) + return nil, fmt.Errorf("file with ID: %d not found", vp.Fid) } // Check for valid offset if we are reading from writable log. @@ -916,7 +915,7 @@ func (vlog *valueLog) getFileRLocked(vp valuePointer) (*logFile, error) { if !vlog.opt.ReadOnly && vp.Fid == maxFid { currentOffset := vlog.woffset() if vp.Offset >= currentOffset { - return nil, errors.Errorf( + return nil, fmt.Errorf( "Invalid value pointer offset: %d greater than current offset: %d", vp.Offset, currentOffset) } @@ -961,7 +960,7 @@ func (vlog *valueLog) Read(vp valuePointer, _ *y.Slice) ([]byte, func(), error) } if uint32(len(kv)) < h.klen+h.vlen { vlog.db.opt.Errorf("Invalid read: vp: %+v", vp) - return nil, nil, errors.Errorf("Invalid read: Len: %d read at:[%d:%d]", + return nil, nil, fmt.Errorf("Invalid read: Len: %d read at:[%d:%d]", len(kv), h.klen, h.klen+h.vlen) } return kv[h.klen : h.klen+h.vlen], cb, nil diff --git a/y/error.go b/y/error.go index 239b95d46..fcd293c4e 100644 --- a/y/error.go +++ b/y/error.go @@ -18,10 +18,9 @@ package y // (3) You want to generate a new error with stack trace info. Use x.Errorf. import ( + "errors" "fmt" "log" - - "github.com/pkg/errors" ) var debugMode = false @@ -41,14 +40,14 @@ func Check2(_ interface{}, err error) { // AssertTrue asserts that b is true. Otherwise, it would log fatal. func AssertTrue(b bool) { if !b { - log.Fatalf("%+v", errors.Errorf("Assert failed")) + log.Fatalf("%+v", errors.New("Assert failed")) } } // AssertTruef is AssertTrue with extra info. func AssertTruef(b bool, format string, args ...interface{}) { if !b { - log.Fatalf("%+v", errors.Errorf(format, args...)) + log.Fatalf("%+v", fmt.Errorf(format, args...)) } } @@ -60,18 +59,12 @@ func Wrap(err error, msg string) error { } return fmt.Errorf("%s err: %+v", msg, err) } - return errors.Wrap(err, msg) + return fmt.Errorf("%s: %w", msg, err) } // Wrapf is Wrap with extra info. func Wrapf(err error, format string, args ...interface{}) error { - if !debugMode { - if err == nil { - return nil - } - return fmt.Errorf(format+" error: %+v", append(args, err)...) - } - return errors.Wrapf(err, format, args...) + return Wrap(err, fmt.Sprintf(format, args...)) } func CombineErrors(one, other error) error {