Documentation
¶
Overview ¶
Package fun is a zero-dependency collection of tools and idoms that takes advantage of generics. Streams, error handling, a native-feeling Set type, and a simple pub-sub framework for distributing messages in fan-out patterns.
Index ¶
- Constants
- Variables
- func Convert[T any, O any](op fnx.Converter[T, O]) interface{ ... }
- func ConvertFn[T any, O any](op fn.Converter[T, O]) interface{ ... }
- type ChanOp
- func (op ChanOp[T]) Blocking() ChanOp[T]
- func (op ChanOp[T]) Cap() int
- func (op ChanOp[T]) Channel() chan T
- func (op ChanOp[T]) Close()
- func (op ChanOp[T]) Iterator(ctx context.Context) iter.Seq[T]
- func (op ChanOp[T]) Len() int
- func (op ChanOp[T]) NonBlocking() ChanOp[T]
- func (op ChanOp[T]) Receive() ChanReceive[T]
- func (op ChanOp[T]) Send() ChanSend[T]
- func (op ChanOp[T]) Stream() *Stream[T]
- type ChanReceive
- func (ro ChanReceive[T]) Check(ctx context.Context) (T, bool)
- func (ro ChanReceive[T]) Drop(ctx context.Context) bool
- func (ro ChanReceive[T]) Filter(ctx context.Context, filter func(T) bool) ChanReceive[T]
- func (ro ChanReceive[T]) Force(ctx context.Context) (out T)
- func (ro ChanReceive[T]) Ignore(ctx context.Context)
- func (ro ChanReceive[T]) Iterator(ctx context.Context) iter.Seq[T]
- func (ro ChanReceive[T]) Ok() bool
- func (ro ChanReceive[T]) Read(ctx context.Context) (T, error)
- func (ro ChanReceive[T]) ReadAll(op func(context.Context, T) error) fnx.Worker
- func (ro ChanReceive[T]) Stream() *Stream[T]
- type ChanSend
- func (sm ChanSend[T]) Check(ctx context.Context, it T) bool
- func (sm ChanSend[T]) Ignore(ctx context.Context, it T)
- func (sm ChanSend[T]) Signal(ctx context.Context)
- func (sm ChanSend[T]) Write(ctx context.Context, it T) (err error)
- func (sm ChanSend[T]) WriteAll(iter *Stream[T]) fnx.Worker
- func (sm ChanSend[T]) Zero(ctx context.Context) error
- type Constructors
- func (Constructors) Atoi() fnx.Converter[string, int]
- func (Constructors) ContextChannelWorker(ctx context.Context) fnx.Worker
- func (Constructors) ConvertErrorsToStrings() fnx.Converter[[]error, []string]
- func (Constructors) ConvertOperationToWorker() fnx.Converter[fnx.Operation, fnx.Worker]
- func (Constructors) ConvertWorkerToOperation(eh fn.Handler[error]) fnx.Converter[fnx.Worker, fnx.Operation]
- func (Constructors) Counter(maxVal int) *Stream[int]
- func (Constructors) ErrorChannelWorker(ch <-chan error) fnx.Worker
- func (Constructors) ErrorHandler(of fn.Handler[error]) fn.Handler[error]
- func (Constructors) ErrorHandlerWithAbort(cancel context.CancelFunc) fn.Handler[error]
- func (Constructors) ErrorHandlerWithoutCancelation(of fn.Handler[error]) fn.Handler[error]
- func (Constructors) ErrorHandlerWithoutTerminating(of fn.Handler[error]) fn.Handler[error]
- func (Constructors) ErrorStream(ec *erc.Collector) *Stream[error]
- func (Constructors) ErrorUnwindTransformer(filter erc.Filter) fnx.Converter[error, []error]
- func (Constructors) Itoa() fnx.Converter[int, string]
- func (Constructors) Lines(reader io.Reader) *Stream[string]
- func (Constructors) LinesWithSpaceTrimed(reader io.Reader) *Stream[string]
- func (Constructors) OperationHandler() fnx.Handler[fnx.Operation]
- func (Constructors) OperationPool(st *Stream[fnx.Operation]) fnx.Worker
- func (Constructors) Recover(ob fn.Handler[error])
- func (Constructors) RunAllOperations(st *Stream[fnx.Operation]) fnx.Worker
- func (Constructors) RunAllWorkers(st *Stream[fnx.Worker]) fnx.Worker
- func (Constructors) Signal() (func(), fnx.Worker)
- func (Constructors) Sprint(args ...any) fn.Future[string]
- func (Constructors) Sprintf(tmpl string, args ...any) fn.Future[string]
- func (Constructors) Sprintln(args ...any) fn.Future[string]
- func (Constructors) Str(args []any) fn.Future[string]
- func (Constructors) StrConcatinate(strs ...string) fn.Future[string]
- func (Constructors) StrJoin(args []any) fn.Future[string]
- func (Constructors) StrSliceConcatinate(input []string) fn.Future[string]
- func (Constructors) Strf(tmpl string, args []any) fn.Future[string]
- func (Constructors) Stringer(op fmt.Stringer) fn.Future[string]
- func (Constructors) StringsJoin(strs []string, sep string) fn.Future[string]
- func (Constructors) Strln(args []any) fn.Future[string]
- func (Constructors) WorkerHandler() fnx.Handler[fnx.Worker]
- func (Constructors) WorkerPool(st *Stream[fnx.Worker]) fnx.Worker
- type OptionProvider
- func JoinOptionProviders[T any](op ...OptionProvider[T]) OptionProvider[T]
- func WorkerGroupConfAddExcludeErrors(errs ...error) OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfContinueOnError() OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfContinueOnPanic() OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfDefaults() OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfIncludeContextErrors() OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfNumWorkers(num int) OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfSet(opt *WorkerGroupConf) OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfWithErrorCollector(ec *erc.Collector) OptionProvider[*WorkerGroupConf]
- func WorkerGroupConfWorkerPerCPU() OptionProvider[*WorkerGroupConf]
- type RuntimeInvariant
- func (RuntimeInvariant) Failure(args ...any)
- func (RuntimeInvariant) IsFalse(cond bool, args ...any)
- func (RuntimeInvariant) IsTrue(cond bool, args ...any)
- func (RuntimeInvariant) Must(err error, args ...any)
- func (RuntimeInvariant) New(args ...any) error
- func (RuntimeInvariant) Ok(cond bool, args ...any)
- type Stream
- func ChannelStream[T any](ch <-chan T) *Stream[T]
- func JoinStreams[T any](iters ...*Stream[T]) *Stream[T]
- func MakeStream[T any](gen func(context.Context) (T, error)) *Stream[T]
- func MergeStreams[T any](iters *Stream[*Stream[T]]) *Stream[T]
- func SeqStream[T any](it iter.Seq[T]) *Stream[T]
- func SliceStream[T any](in []T) *Stream[T]
- func VariadicStream[T any](in ...T) *Stream[T]
- func (st *Stream[T]) AddError(e error)
- func (st *Stream[T]) Buffer(n int) *Stream[T]
- func (st *Stream[T]) BufferParallel(n int) *Stream[T]
- func (st *Stream[T]) BufferedChannel(ctx context.Context, size int) <-chan T
- func (st *Stream[T]) Channel(ctx context.Context) <-chan T
- func (st *Stream[T]) Close() error
- func (st *Stream[T]) CloseHook() func(*Stream[T])
- func (st *Stream[T]) Count(ctx context.Context) (count int)
- func (st *Stream[T]) ErrorHandler() fn.Handler[error]
- func (st *Stream[T]) Filter(check func(T) bool) *Stream[T]
- func (st *Stream[T]) Iterator(ctx context.Context) iter.Seq[T]
- func (st *Stream[T]) Join(iters ...*Stream[T]) *Stream[T]
- func (st *Stream[T]) MarshalJSON() ([]byte, error)
- func (st *Stream[T]) Next(ctx context.Context) bool
- func (st *Stream[T]) Parallel(fn fnx.Handler[T], opts ...OptionProvider[*WorkerGroupConf]) fnx.Worker
- func (st *Stream[T]) Read(ctx context.Context) (out T, err error)
- func (st *Stream[T]) ReadAll(fn fnx.Handler[T]) fnx.Worker
- func (st *Stream[T]) Reduce(reducer func(T, T) (T, error)) *Stream[T]
- func (st *Stream[T]) Slice(ctx context.Context) (out []T, _ error)
- func (st *Stream[T]) Split(num int) []*Stream[T]
- func (st *Stream[T]) Transform(converter fnx.Converter[T, T]) *Stream[T]
- func (st *Stream[T]) UnmarshalJSON(in []byte) error
- func (st *Stream[T]) Value() T
- func (st *Stream[T]) WithHook(hook fn.Handler[*Stream[T]]) *Stream[T]
- type WorkerGroupConf
Constants ¶
const ErrInvariantViolation ers.Error = ers.ErrInvariantViolation
ErrInvariantViolation is the root error of the error object that is the content of all panics produced by the Invariant helper.
const ErrNonBlockingChannelOperationSkipped ers.Error = ers.ErrCurrentOpSkip
ErrNonBlockingChannelOperationSkipped is returned when sending into a channel, in a non-blocking context, when the channel was full and the send or receive was therefore skipped.
const ErrRecoveredPanic ers.Error = ers.ErrRecoveredPanic
ErrRecoveredPanic is at the root of any error returned by a function in the fun package that recovers from a panic.
const ErrStreamContinue ers.Error = ers.ErrCurrentOpSkip
ErrStreamContinue instructs consumers of Streams and related processors that run groups. Equivalent to the "continue" keyword in other contexts.
Deprecated: use ers.ErrCurrentOpSkip instead.
Variables ¶
var Invariant = RuntimeInvariant{}
Invariant provides a namespace for making runtime invariant assertions. These all raise panics, passing error objects from panic, which can be more easily handled. These helpers are syntactic sugar around Invariant.OK, and the invariant is violated the ErrInvariantViolation.
var MAKE = Constructors{}
MAKE provides namespaced access to the constructors provided by the Constructors type.
Functions ¶
func Convert ¶ added in v0.13.0
func Convert[T any, O any](op fnx.Converter[T, O]) interface { Stream(*Stream[T]) *Stream[O] Parallel(*Stream[T], ...OptionProvider[*WorkerGroupConf]) *Stream[O] }
Convert takes an input stream of one type, and returns a function which takes an fnx.Converter function that returns the output stream. and converts it to a stream of the another type. All errors from the original stream are propagated to the output stream.
Types ¶
type ChanOp ¶ added in v0.10.0
type ChanOp[T any] struct { // contains filtered or unexported fields }
ChanOp is a wrapper around a channel, to make it easier to write clear code that uses and handles basic operations with single channels. From a high level an operation might look like:
ch := make(chan string)
err := fun.Blocking().Send("hello world")
Methods on ChanOp and related structures are not pointer receivers, ensure that the output values are recorded as needed. Typically it's reasonable to avoid creating ChanOp objects in a loop as well.
func Blocking ¶ added in v0.8.5
Blocking produces a blocking Send instance. All Send/Check/Ignore operations will block until the context is canceled, the channel is canceled, or the send succeeds.
func Chan ¶ added in v0.10.4
Chan constructs a channel op, like "make(chan T)", with the optionally specified length. Operations (like read from and write to a channel) on the channel are blocking by default, but the.
func DefaultChan ¶ added in v0.10.4
DefaultChan takes a channel value and if it is non-nil, returns it; otherwise it constructs a new ChanOp of the specified type with the optionally provided length and returns it.
func NonBlocking ¶ added in v0.8.5
NonBlocking produces a send instance that performs a non-blocking send.
The Send() method, for non-blocking sends, will return ErrSkipedNonBlockingSend if the channel was full and the object was not sent.
func (ChanOp[T]) Blocking ¶ added in v0.10.4
Blocking returns a version of the ChanOp in blocking mode. This is not an atomic operation.
func (ChanOp[T]) Channel ¶ added in v0.10.0
func (op ChanOp[T]) Channel() chan T
Channel returns the underlying channel.
func (ChanOp[T]) Close ¶ added in v0.10.0
func (op ChanOp[T]) Close()
Close closes the underlying channel.
This swallows any panic encountered when calling close() on the underlying channel, which makes it safe to call on nil or already-closed channels: the result in all cases (that the channel is closed when Close() returns, is the same in all cases.)
func (ChanOp[T]) Iterator ¶ added in v0.10.0
Iterator returns a standard Go library iterator over the contents of the channel.
func (ChanOp[T]) NonBlocking ¶ added in v0.10.4
NonBlocking returns a version of the ChanOp in non-blocking mode. This is not an atomic operation.
func (ChanOp[T]) Receive ¶ added in v0.10.0
func (op ChanOp[T]) Receive() ChanReceive[T]
Receive returns a ChanReceive object that acts on the same underlying sender.
type ChanReceive ¶ added in v0.10.0
type ChanReceive[T any] struct { // contains filtered or unexported fields }
ChanReceive wraps a channel fore <-chan T operations. It is the type returned by the ChanReceive() method on ChannelOp. The primary method is Read(), with other methods provided as "self-documenting" helpers.
func BlockingReceive ¶ added in v0.10.0
func BlockingReceive[T any](ch <-chan T) ChanReceive[T]
BlockingReceive is the equivalent of Blocking(ch).Receive(), except that it accepts a receive-only channel.
func NonBlockingReceive ¶ added in v0.10.0
func NonBlockingReceive[T any](ch <-chan T) ChanReceive[T]
NonBlockingReceive is the equivalent of NonBlocking(ch).Receive(), except that it accepts a receive-only channel.
func (ChanReceive[T]) Check ¶ added in v0.10.0
func (ro ChanReceive[T]) Check(ctx context.Context) (T, bool)
Check performs the read operation and converts the error into an "ok" value, returning true if receive was successful and false otherwise.
func (ChanReceive[T]) Drop ¶ added in v0.10.0
func (ro ChanReceive[T]) Drop(ctx context.Context) bool
Drop performs a read operation and drops the response. If an item was dropped (e.g. Read would return an error), Drop() returns false, and true when the Drop was successful.
func (ChanReceive[T]) Filter ¶ added in v0.10.5
func (ro ChanReceive[T]) Filter(ctx context.Context, filter func(T) bool) ChanReceive[T]
Filter returns a channel that consumes the output of a channel and returns a NEW channel that only contains elements that have elements that the filter function returns true for.
func (ChanReceive[T]) Force ¶ added in v0.10.0
func (ro ChanReceive[T]) Force(ctx context.Context) (out T)
Force ignores the error returning only the value from Read. This is either the value sent through the channel, or the zero value for T. Because zero values can be sent through channels, Force does not provide a way to distinguish between "channel-closed" and "received a zero value".
func (ChanReceive[T]) Ignore ¶ added in v0.10.0
func (ro ChanReceive[T]) Ignore(ctx context.Context)
Ignore reads one item from the channel and discards it.
func (ChanReceive[T]) Iterator ¶ added in v0.10.0
func (ro ChanReceive[T]) Iterator(ctx context.Context) iter.Seq[T]
Iterator provides access to the contents of the channel as a new-style standard library stream. For ChanRecieve objects in non-blocking mode, iteration ends when there are no items in the channel. In blocking mode, iteration ends when the context is canceled or the channel is closed.
func (ChanReceive[T]) Ok ¶ added in v0.10.0
func (ro ChanReceive[T]) Ok() bool
Ok attempts to read from a channel returns true either when the channel is blocked or an item is read from the channel and false when the channel has been closed.
func (ChanReceive[T]) Read ¶ added in v0.10.0
func (ro ChanReceive[T]) Read(ctx context.Context) (T, error)
Read performs the read operation according to the blocking/non-blocking semantics of the receive operation.
In general errors are either: io.EOF if channel is closed; a context cancellation error if the context passed to Read() is canceled, or ErrSkippedNonBlockingChannelOperation in the non-blocking case if the channel was empty.
In all cases when Read() returns an error, the return value is the zero value for T.
func (ChanReceive[T]) ReadAll ¶ added in v0.13.0
ReadAll returns a Worker function that processes the output of data from the channel with the Handler function. If the processor function returns ers.ErrCurrentOpSkip, the processing will continue. All other Handler errors (and problems reading from the channel,) abort stream. io.EOF errors are not propagated to the caller.
func (ChanReceive[T]) Stream ¶ added in v0.12.0
func (ro ChanReceive[T]) Stream() *Stream[T]
Stream provides access to the contents of the channel as a fun-style stream. For ChanRecieve objects in non-blocking mode, iteration ends when there are no items in the channel. In blocking mode, iteration ends when the context is canceled or the channel is closed.
type ChanSend ¶ added in v0.10.0
type ChanSend[T any] struct { // contains filtered or unexported fields }
ChanSend provides access to channel send operations, and is contstructed by the ChanSend() method on the channel operation. The primary method is Write(), with other methods provided for clarity.
func BlockingSend ¶ added in v0.10.0
BlockingSend is equivalent to Blocking(ch).Send() except that it accepts a send-only channel.
func NonBlockingSend ¶ added in v0.10.0
NonBlockingSend is equivalent to NonBlocking(ch).Send() except that it accepts a send-only channel.
func (ChanSend[T]) Check ¶ added in v0.10.0
Check performs a send and returns true when the send was successful and false otherwise.
func (ChanSend[T]) Signal ¶ added in v0.10.0
Signal attempts to sends the Zero value of T through the channel and returns when: the send succeeds, the channel is full and this is a non-blocking send, the context is canceled, or the channel is closed.
func (ChanSend[T]) Write ¶ added in v0.10.0
Write sends the item into the channel captured by Blocking/NonBlocking returning the appropriate error.
The returned error is nil if the send was successful, and an io.EOF if the channel is closed (or nil) rather than a panic (as with the equivalent direct operation.) The error value is a context cancelation error when the context is canceled, and for non-blocking sends, if the channel did not accept the write, ErrSkippedNonBlockingChannelOperation is returned.
type Constructors ¶ added in v0.12.0
type Constructors struct{}
The Constructors type serves to namespace constructors of common operations and specializations of generic functions provided by this package.
func (Constructors) Atoi ¶ added in v0.12.0
func (Constructors) Atoi() fnx.Converter[string, int]
Atoi produces a Transform function that converts strings into integers.
func (Constructors) ContextChannelWorker ¶ added in v0.13.0
func (Constructors) ContextChannelWorker(ctx context.Context) fnx.Worker
ContextChannelWorker creates a worker function that wraps a context and will--when called--block until the context is done, returning the context's cancellation error. Unless provided with a custom context that can be canceled but does not return an error (which would break many common assumptions regarding contexts,) this worker will always return an error.
func (Constructors) ConvertErrorsToStrings ¶ added in v0.12.0
func (Constructors) ConvertErrorsToStrings() fnx.Converter[[]error, []string]
ConvertErrorsToStrings makes a Converter function that translates slices of errors to slices of errors.
func (Constructors) ConvertOperationToWorker ¶ added in v0.13.0
ConvertOperationToWorker provides a converter function to produce Worker functions from Operation functions. The errors produced by the worker functions--if any--are the recovered panics from the inner operation.
func (Constructors) ConvertWorkerToOperation ¶ added in v0.13.0
func (Constructors) ConvertWorkerToOperation(eh fn.Handler[error]) fnx.Converter[fnx.Worker, fnx.Operation]
ConvertWorkerToOperation converts Worker functions to Operation function, capturing their errors with the provided error handler.
func (Constructors) Counter ¶ added in v0.12.0
func (Constructors) Counter(maxVal int) *Stream[int]
Counter produces a stream that, starting at 1, yields monotonically increasing integers until the maximum is reached.
func (Constructors) ErrorChannelWorker ¶ added in v0.12.0
func (Constructors) ErrorChannelWorker(ch <-chan error) fnx.Worker
ErrorChannelWorker constructs a worker from an error channel. The resulting worker blocks until an error is produced in the error channel, the error channel is closed, or the worker's context is canceled. If the channel is closed, the worker will return a nil error, and if the context is canceled, the worker will return a context error. In all other cases the work will propagate the error (or nil) received from the channel.
You can call the resulting worker function more than once: if there are multiple errors produced or passed to the channel, they will be propogated; however, after the channel is closed subsequent calls to the worker function will return nil.
func (Constructors) ErrorHandler ¶ added in v0.12.0
ErrorHandler constructs an error observer that only calls the wrapped observer when the error passed is non-nil.
func (Constructors) ErrorHandlerWithAbort ¶ added in v0.12.0
func (Constructors) ErrorHandlerWithAbort(cancel context.CancelFunc) fn.Handler[error]
ErrorHandlerWithAbort creates a new error handler that--ignoring nil and context expiration errors--will call the provided context cancellation function when it receives an error.
Use the Chain and Join methods of handlers to further process the error.
func (Constructors) ErrorHandlerWithoutCancelation ¶ added in v0.12.0
ErrorHandlerWithoutCancelation wraps and returns an error handler that filters all nil errors and errors that are rooted in context Cancellation from the wrapped Handler.
func (Constructors) ErrorHandlerWithoutTerminating ¶ added in v0.12.0
ErrorHandlerWithoutTerminating wraps an error observer and only calls the underlying observer if the input error is non-nil and is not one of the "terminating" errors used by this package (e.g. io.EOF and similar errors). Context cancellation errors can and should be filtered separately.
func (Constructors) ErrorStream ¶ added in v0.12.0
func (Constructors) ErrorStream(ec *erc.Collector) *Stream[error]
ErrorStream provides a stream that provides access to the error collector.
func (Constructors) ErrorUnwindTransformer ¶ added in v0.12.0
ErrorUnwindTransformer provides the ers.Unwind operation as a transform method, which consumes an error and produces a slice of its component errors. All errors are processed by the provided filter, and the transformer's context is not used. The error value of the Transform function is always nil.
func (Constructors) Itoa ¶ added in v0.12.0
func (Constructors) Itoa() fnx.Converter[int, string]
Itoa produces a Transform function that converts integers into strings.
func (Constructors) Lines ¶ added in v0.12.0
func (Constructors) Lines(reader io.Reader) *Stream[string]
Lines provides a fun.Stream access over the contexts of a (presumably plaintext) io.Reader, using the bufio.Scanner.
func (Constructors) LinesWithSpaceTrimed ¶ added in v0.12.0
func (Constructors) LinesWithSpaceTrimed(reader io.Reader) *Stream[string]
LinesWithSpaceTrimed provides a stream with access to the line-separated content of an io.Reader, line Lines(), but with the leading and trailing space trimmed from each line.
func (Constructors) OperationHandler ¶ added in v0.13.0
func (Constructors) OperationHandler() fnx.Handler[fnx.Operation]
OperationHandler constructs a Handler function for running Worker functions. Use with streams to build worker pools.
The Handlers type serves to namespace these constructors, for interface clarity purposes. Use the MAKE variable to access this method as in:
fun.MAKE.OperationHandler()
func (Constructors) OperationPool ¶ added in v0.12.0
OperationPool returns a fnx.Operation that, when called, processes the incoming stream of fnx.Operations, starts a go routine for running each element in the stream, (without any throttling or rate limiting) and then blocks until all operations have returned, or the context passed to the output function has been canceled.
For more configuraable options, use the itertool.Worker() function which provides more configurability and supports both fnx.Operation and Worker functions.f.
func (Constructors) Recover ¶ added in v0.12.0
func (Constructors) Recover(ob fn.Handler[error])
Recover catches a panic, turns it into an error and passes it to the provided observer function.
func (Constructors) RunAllOperations ¶ added in v0.13.0
RunAllOperations returns a worker function that will run all the fnx.Operation functions in the stream serially.
func (Constructors) RunAllWorkers ¶ added in v0.13.0
RunAllWorkers returns a Worker function that will run all of the Worker functions in the stream serially.
func (Constructors) Signal ¶ added in v0.12.0
func (Constructors) Signal() (func(), fnx.Worker)
Signal is a wrapper around the common pattern where signal channels are closed to pass termination and blocking notifications between go routines. The constructor returns two functions: a closer operation--func()--and a Worker that waits for the closer to be triggered.
The closer is safe to call multiple times. The worker ALWAYS returns the context cancellation error if its been canceled even if the signal channel was closed.
func (Constructors) Sprint ¶ added in v0.12.0
func (Constructors) Sprint(args ...any) fn.Future[string]
Sprint constructs a future that calls fmt.Sprint over the given variadic arguments.
func (Constructors) Sprintf ¶ added in v0.12.0
Sprintf produces a future that calls and returns fmt.Sprintf for the provided arguments when the future is called.
func (Constructors) Sprintln ¶ added in v0.12.0
func (Constructors) Sprintln(args ...any) fn.Future[string]
Sprintln constructs a future that calls fmt.Sprintln over the given variadic arguments.
func (Constructors) Str ¶ added in v0.12.0
func (Constructors) Str(args []any) fn.Future[string]
Str provides a future that calls fmt.Sprint over a slice of any objects. Use fun.MAKE.Sprint for a variadic alternative.
func (Constructors) StrConcatinate ¶ added in v0.12.0
func (Constructors) StrConcatinate(strs ...string) fn.Future[string]
StrConcatinate produces a future that joins a variadic sequence of strings into a single string.
func (Constructors) StrJoin ¶ added in v0.12.0
func (Constructors) StrJoin(args []any) fn.Future[string]
StrJoin like Strln and Sprintln create a concatenated string representation of a sequence of values, however StrJoin omits the final new line character that Sprintln adds. This is similar in functionality MAKE.Sprint() or MAKE.Str() but ALWAYS adds a space between elements.
func (Constructors) StrSliceConcatinate ¶ added in v0.12.0
func (Constructors) StrSliceConcatinate(input []string) fn.Future[string]
StrSliceConcatinate produces a future for strings.Join(), concatenating the elements in the input slice with the provided separator.
func (Constructors) Strf ¶ added in v0.12.0
Strf produces a future that calls fmt.Sprintf for the given template string and arguments.
func (Constructors) Stringer ¶ added in v0.12.0
Stringer converts a fmt.Stringer object/method call into a string-formatter.
func (Constructors) StringsJoin ¶ added in v0.12.0
StringsJoin produces a future that combines a slice of strings into a single string, joined with the separator.
func (Constructors) Strln ¶ added in v0.12.0
func (Constructors) Strln(args []any) fn.Future[string]
Strln constructs a future that calls fmt.Sprintln for the given arguments.
func (Constructors) WorkerHandler ¶ added in v0.13.0
func (Constructors) WorkerHandler() fnx.Handler[fnx.Worker]
WorkerHandler constructs a Handler function for running Worker functions. Use with streams to build worker pools.
The Handlers type serves to namespace these constructors, for interface clarity purposes. Use the MAKE variable to access this method as in:
fun.MAKE.WorkerHandler()
The WorkerHandler provides no panic protection.
func (Constructors) WorkerPool ¶ added in v0.12.0
WorkerPool creates a work that processes a stream of worker functions, for simple and short total-duration operations. Every worker in the pool runs in it's own go routine, and there are no limits or throttling on the number of go routines. All errors are aggregated and in a single collector (erc.Stack) which is returned by the worker when the operation ends (if many Worker's error this may create memory pressure) and there's no special handling of panics.
For more configuraable options, use the itertool.Worker() function which provides more configurability and supports both fnx.Operation and Worker functions.
type OptionProvider ¶ added in v0.10.0
OptionProvider is a function type for building functional arguments, and is used for the parallel stream processing (map, transform, for-each, etc.) in the fun and itertool packages, and available with tooling for use in other contexts.
The type T should always be mutable (e.g. a map, or a pointer).
func JoinOptionProviders ¶ added in v0.10.0
func JoinOptionProviders[T any](op ...OptionProvider[T]) OptionProvider[T]
JoinOptionProviders takes a zero or more option providers and produces a single combined option provider. With zero or nil arguments, the operation becomes a noop.
func WorkerGroupConfAddExcludeErrors ¶ added in v0.10.0
func WorkerGroupConfAddExcludeErrors(errs ...error) OptionProvider[*WorkerGroupConf]
WorkerGroupConfAddExcludeErrors appends the provided errors to the ExcludedErrors value. The provider will return an error if any of the input streams is ErrRecoveredPanic.
func WorkerGroupConfContinueOnError ¶ added in v0.10.0
func WorkerGroupConfContinueOnError() OptionProvider[*WorkerGroupConf]
WorkerGroupConfContinueOnError toggles the option that allows the operation to continue when the operation encounters an error. Otherwise, any option will lead to an abort.
func WorkerGroupConfContinueOnPanic ¶ added in v0.10.0
func WorkerGroupConfContinueOnPanic() OptionProvider[*WorkerGroupConf]
WorkerGroupConfContinueOnPanic toggles the option that allows the operation to continue when encountering a panic.
func WorkerGroupConfDefaults ¶ added in v0.13.0
func WorkerGroupConfDefaults() OptionProvider[*WorkerGroupConf]
WorkerGroupConfDefaults sets the "continue-on-error" option and the "number-of-worers-equals-numcpus" options.
func WorkerGroupConfIncludeContextErrors ¶ added in v0.10.0
func WorkerGroupConfIncludeContextErrors() OptionProvider[*WorkerGroupConf]
WorkerGroupConfIncludeContextErrors toggles the option that forces the operation to include context errors in the output. By default they are not included.
func WorkerGroupConfNumWorkers ¶ added in v0.10.0
func WorkerGroupConfNumWorkers(num int) OptionProvider[*WorkerGroupConf]
WorkerGroupConfNumWorkers sets the number of workers configured. It is not possible to set this value to less than 1: negative values and 0 are always ignored.
func WorkerGroupConfSet ¶ added in v0.10.0
func WorkerGroupConfSet(opt *WorkerGroupConf) OptionProvider[*WorkerGroupConf]
WorkerGroupConfSet overrides the option with the provided option.
func WorkerGroupConfWithErrorCollector ¶ added in v0.10.0
func WorkerGroupConfWithErrorCollector(ec *erc.Collector) OptionProvider[*WorkerGroupConf]
WorkerGroupConfWithErrorCollector sets an error collector implementation for later use in the WorkerGroupOptions. The resulting function will only error if the collector is nil, however, this method will override an existing error collector.
The ErrorCollector interface is typically provided by the `erc.Collector` type.
ErrorCollectors are used by some operations to collect, aggregate, and distribute errors from operations to the caller.
func WorkerGroupConfWorkerPerCPU ¶ added in v0.10.0
func WorkerGroupConfWorkerPerCPU() OptionProvider[*WorkerGroupConf]
WorkerGroupConfWorkerPerCPU sets the number of workers to the number of detected CPUs by the runtime (e.g. runtime.NumCPU()).
func (OptionProvider[T]) Apply ¶ added in v0.10.0
func (op OptionProvider[T]) Apply(in T) error
Apply applies the current Operation Provider to the configuration, and if the type T implements a Validate() method, calls that. All errors are aggregated.
func (OptionProvider[T]) Build ¶ added in v0.10.3
func (op OptionProvider[T]) Build(conf T) (out T, err error)
Build processes a configuration object, returning a modified version (or a zero value, in the case of an error).
func (OptionProvider[T]) Join ¶ added in v0.10.0
func (op OptionProvider[T]) Join(opps ...OptionProvider[T]) OptionProvider[T]
Join aggregates a collection of Option Providers into a single option provider. The amalgamated operation is panic safe and omits all nil providers.
type RuntimeInvariant ¶ added in v0.10.0
type RuntimeInvariant struct{}
RuntimeInvariant is a type defined to create a namespace, callable (typically) via the Invariant symbol. Access these functions as in:
fun.Invariant.IsTrue(len(slice) > 0, "slice must have elements", len(slice))
func (RuntimeInvariant) Failure ¶ added in v0.10.1
func (RuntimeInvariant) Failure(args ...any)
Failure unconditionally raises an invariant failure error and processes the arguments as with the other invariant failures: extracting errors and aggregating constituent errors.
func (RuntimeInvariant) IsFalse ¶ added in v0.10.0
func (RuntimeInvariant) IsFalse(cond bool, args ...any)
IsFalse provides a runtime assertion that the condition is false, and annotates panic object, which is an error rooted in the ErrInvariantViolation. In all other cases the operation is a noop.
func (RuntimeInvariant) IsTrue ¶ added in v0.10.0
func (RuntimeInvariant) IsTrue(cond bool, args ...any)
IsTrue provides a runtime assertion that the condition is true, and annotates panic object, which is an error rooted in the ErrInvariantViolation. In all other cases the operation is a noop.
func (RuntimeInvariant) Must ¶ added in v0.10.0
func (RuntimeInvariant) Must(err error, args ...any)
Must raises an invariant error if the error is not nil. The content of the panic is both--via wrapping--an ErrInvariantViolation and the error itself.
func (RuntimeInvariant) New ¶ added in v0.12.0
func (RuntimeInvariant) New(args ...any) error
New creates an error that is rooted in ers.ErrInvariantViolation, aggregating errors and annotating the error.
func (RuntimeInvariant) Ok ¶ added in v0.10.9
func (RuntimeInvariant) Ok(cond bool, args ...any)
Ok panics if the condition is false, passing an error that is rooted in InvariantViolation. Otherwise the operation is a noop.
type Stream ¶ added in v0.12.0
type Stream[T any] struct { // contains filtered or unexported fields }
Stream provides a safe, context-respecting iteration/sequence paradigm, and entire tool kit for consumer functions, converters, and generation options.
As the basis and heart of a programming model, streams make it possible to think about groups or sequences of objects or work, that can be constructed and populated lazily, and provide a collection of interfaces for processing and manipulating data.
Beyond the stream interactive tools provided in this package, the itertool package provdes some additional helpers and tools, while the adt and dt packages provide simple types and tooling built around these streams
The canonical way to use a stream is with the core Next() Value() and Close() methods: Next takes a context and advances the stream. Next, which is typically called in single-clause for loop (e.g. as in while loop) returns false when the stream has no items, after which the stream should be closed and cannot be re-started. When Next() returns true, the stream is advanced, and the output of Value() will provide the value at the current position in the stream. Next() will block if the stream has not been closed, and the operation with Produces or Generates new items for the stream blocks, (or continues iterating) until the stream is exhausted, or closed.
However, additional methods, such as ReadOne, the Future() function (which is a wrapper around ReadOne) provide a different iteraction paradigm: they combine the Next() and value operations into a single function call. When the stream is exhausted these methods return the `io.EOF` error.
In all cases, checking the Close() value of the stream makes it possible to see any errors encountered during the operation of the stream.
Using Next/Value cannot be used concurrently, as there is no way to synchronize the Next/Value calls with respect to eachother: it's possible in this mode to both miss and/or get duplicate values from the stream in this case. If the future function in the stream is safe for concurrent use, then ReadOne can be used safely. As a rule, all tooling in the fun package uses ReadOne except in a few cases where a caller has exclusive access to the stream.
func ChannelStream ¶ added in v0.12.0
ChannelStream exposes access to an existing "receive" channel as a stream.
func JoinStreams ¶ added in v0.12.0
JoinStreams takes a sequence of streams and produces a combined stream. JoinStreams processes items sequentially from each stream. By contrast, MergeStreams constructs a stream that reads all of the items from the input streams in parallel, and returns items in an arbitrary order.
Use JoinStreams or FlattenStreams if order is important. Use FlattenStream for larger numbers of streams. Use MergeStreams when producing an item takes a non-trivial amount of time.
func MakeStream ¶ added in v0.12.0
MakeStream constructs a stream that calls the Future function once for every item, until it errors. Errors other than context cancellation errors and io.EOF are propgated to the stream's Close method.
func MergeStreams ¶ added in v0.12.0
MergeStreams takes a collection of streams of the same type of objects and provides a single stream over these items.
There are a collection of background threads, one for each input stream, which will iterate over the inputs and will provide the items to the output stream. These threads start on the first iteration and will return if this context is canceled.
The stream will continue to produce items until all input streams have been consumed, the initial context is canceled, or the Close method is called, or all of the input streams have returned an error.
Use MergeStreams when producing an item takes a non-trivial amount of time. Use ChainStreams or FlattenStreams if order is important. Use FlattenStream for larger numbers of streams.
func SliceStream ¶ added in v0.12.0
SliceStream provides Stream access to the elements in a slice.
func VariadicStream ¶ added in v0.12.0
VariadicStream produces a stream from an arbitrary collection of objects, passed into the constructor.
func (*Stream[T]) AddError ¶ added in v0.12.0
AddError can be used by calling code to add errors to the stream, which are merged.
AddError is not safe for concurrent use (with regards to other AddError calls or Close).
func (*Stream[T]) Buffer ¶ added in v0.12.0
Buffer adds a buffer in the queue using a channel as buffer to smooth out iteration performance, if the iteration function (future) and the consumer both take time, even a small buffer will improve the throughput of the system and prevent both components of the system from blocking on eachother.
The ordering of elements in the output stream is the same as the order of elements in the input stream.
func (*Stream[T]) BufferParallel ¶ added in v0.12.0
BufferParallel processes the input queue and stores those items in a channel (like Buffer); however, unlike Buffer, multiple workers consume the input stream: as a result the order of the elements in the output stream is not the same as the input order.
Otherwise, the two Buffer methods are equivalent and serve the same purpose: process the items from a stream without blocking the consumer of the stream.
func (*Stream[T]) BufferedChannel ¶ added in v0.12.0
BufferedChannel provides access to the content of the stream with a buffered channel that is closed when the stream is exhausted.
func (*Stream[T]) Channel ¶ added in v0.12.0
Channel proides access to the contents of the stream as a channel. The channel is closed when the stream is exhausted.
func (*Stream[T]) Close ¶ added in v0.12.0
Close terminates the stream and returns any errors collected during iteration. If the stream allocates resources, this will typically release them, but close may not block until all resources are released.
Close is safe to call more than once and always resolves the error handler (e.g. AddError),.
func (*Stream[T]) CloseHook ¶ added in v0.12.0
CloseHook returns a function that can be passed to the WithHook() method on a _new_ stream that wraps this stream, so that the other stream will call the inner stream's close method and include the inner stream's errors.
func (*Stream[T]) Count ¶ added in v0.12.0
Count returns the number of items observed by the stream. Callers should still manually call Close on the stream.
func (*Stream[T]) ErrorHandler ¶ added in v0.12.0
ErrorHandler provides access to the AddError method as an error observer.
func (*Stream[T]) Filter ¶ added in v0.12.0
Filter passes every item in the stream and, if the check function returns true propagates it to the output stream. There is no buffering, and check functions should return quickly. For more advanced use, consider using itertool.Map().
func (*Stream[T]) Iterator ¶ added in v0.12.0
Iterator converts a fun.Stream[T] into a native go iterator.
func (*Stream[T]) Join ¶ added in v0.12.0
Join merges multiple streams processing and producing their results sequentially, and without starting any go routines. Otherwise similar to Flatten (which processes each stream in parallel).
func (*Stream[T]) MarshalJSON ¶ added in v0.12.0
MarshalJSON is useful for implementing json.Marshaler methods from stream-supporting types. Wrapping the standard library's json encoding tools.
The contents of the stream are marshaled as elements in an JSON array.
func (*Stream[T]) Next ¶ added in v0.12.0
Next advances the stream (using ReadOne) and caches the current value for access with the Value() method. When Next is true, the Value() will return the next item. When false, either the stream has been exhausted (e.g. the Future function has returned io.EOF) or the context passed to Next has been canceled.
Using Next/Value cannot be done safely if stream is accessed from multiple go routines concurrently. In these cases use ReadOne directly, or use Split to create a stream that safely draws items from the parent stream.
func (*Stream[T]) Parallel ¶ added in v0.12.0
func (st *Stream[T]) Parallel(fn fnx.Handler[T], opts ...OptionProvider[*WorkerGroupConf]) fnx.Worker
Parallel produces a worker that, when executed, will iteratively processes the contents of the stream. The options control the error handling and parallelism semantics of the operation.
This is the work-house operation of the package, and can be used as the basis of worker pools, even processing, or message dispatching for pubsub queues and related systems.
func (*Stream[T]) Read ¶ added in v0.12.0
Read returns a single value from the stream. This operation IS safe for concurrent use.
Read returns the io.EOF error when the stream has been exhausted, a context expiration error or the underlying error produced by the stream. All errors produced by Read are terminal and indicate that no further iteration is possible.
func (*Stream[T]) ReadAll ¶ added in v0.12.0
ReadAll provides a function consumes all items in the stream with the provided processor function.
All panics are converted to errors and propagated in the response of the worker, and abort the processing. If the handler function returns ers.ErrCurrentOpSkip, processing continues. All other errors abort processing and are returned by the worker.
func (*Stream[T]) Reduce ¶ added in v0.12.0
Reduce processes a stream with a reducer function. The output function is a Future operation which runs synchronously, and no processing happens before future is called. If the reducer function returns ers.ErrCurrentOpSkip, the output value is ignored, and the reducer operation continues.
If the underlying stream returns an error, it's returned by the close method of this the new stream. If the reducer function returns an error, that error is returned either by the Read or Close methods of the stream. If the underlying stream terminates cleanly, then the reducer will return it's last value without error. Otherwise any error returned by the Reduce method, other than ers.ErrCurrentOpSkip, is propagated to the caller.
The "previous" value for the first reduce option is the zero value for the type T.
func (*Stream[T]) Slice ¶ added in v0.12.0
Slice converts a stream to the slice of it's values, and closes the stream at the when the stream has been exhausted..
In the case of an error in the underlying stream the output slice will have the values encountered before the error.
func (*Stream[T]) Split ¶ added in v0.12.0
Split produces an arbitrary number of streams which divide the input. The division is lazy and depends on the rate of consumption of output streams, but every item from the input stream is sent to exactly one output stream, each of which can be safely used from a different go routine.
The input stream is not closed after the output streams are exhausted. There is one background go routine that reads items off of the input stream, which starts when the first output stream is advanced: be aware that canceling this context will effectively cancel all streams.
func (*Stream[T]) Transform ¶ added in v0.12.0
Transform passes each item in a stream through a converter to produce a new stream with transformed items.
func (*Stream[T]) UnmarshalJSON ¶ added in v0.12.0
UnmarshalJSON reads a byte-array of input data that contains a JSON array and then processes and returns that data iteratively.
To handle streaming data from an io.Reader that contains a stream of line-separated json documents, use itertool.JSON.
func (*Stream[T]) Value ¶ added in v0.12.0
func (st *Stream[T]) Value() T
Value returns the object at the current position in the stream. It's often used with Next() for looping over the stream.
Value and Next cannot be done safely when the stream is being used concrrently. Use ReadOne or the Future method.
type WorkerGroupConf ¶ added in v0.10.0
type WorkerGroupConf struct {
// NumWorkers describes the number of parallel workers
// processing the incoming stream items and running the map
// function. All values less than 1 are converted to 1. Any
// value greater than 1 will result in out-of-sequence results
// in the output stream.
NumWorkers int
// ContinueOnPanic prevents the operations from halting when a
// single processing function panics. In all modes mode panics
// are converted to errors and propagated to the output
// stream's Close() method,.
ContinueOnPanic bool
// ContinueOnError allows a processing function to return an
// error and allow the work of the broader operation to
// continue. Errors are aggregated propagated to the output
// stream's Close() method.
ContinueOnError bool
// IncludeContextExpirationErrors changes the default handling
// of context cancellation errors. By default all errors
// rooted in context cancellation are not propagated to the
// Close() method, however, when true, these errors are
// captured. All other error handling semantics
// (e.g. ContinueOnError) are applicable.
IncludeContextExpirationErrors bool
// ExcludedErrors is a list of that should not be included
// in the collected errors of the
// output. ers.ErrRecoveredPanic is always included and io.EOF
// is never included.
ExcludedErrors []error
// ErrorCollector provides a way to connect an existing error
// collector to a worker group.
ErrorCollector *erc.Collector
}
WorkerGroupConf describes the runtime options to several operations operations. The zero value of this struct provides a usable strict operation.
func (*WorkerGroupConf) CanContinueOnError ¶ added in v0.10.0
func (o *WorkerGroupConf) CanContinueOnError(err error) (out bool)
CanContinueOnError checks an error, collecting it as needed using the WorkerGroupConf, and then returning true if processing should continue and false otherwise.
Neither io.EOF nor ers.ErrCurrentOpSkip errors are ever observed. All panic errors are observed. Context cancellation errors are observed only when configured. as well as context cancellation errors when configured.
func (*WorkerGroupConf) Validate ¶ added in v0.10.0
func (o *WorkerGroupConf) Validate() error
Validate ensures that the configuration is valid, and returns an error if there are impossible configurations.
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
Package adt provides "atomic data types" as strongly-typed generic helpers for simple atomic operations (including sync.Map, sync.Pool, and a typed value).
|
Package adt provides "atomic data types" as strongly-typed generic helpers for simple atomic operations (including sync.Map, sync.Pool, and a typed value). |
|
Package assert provides an incredibly simple assertion framework, that relies on generics and simplicity.
|
Package assert provides an incredibly simple assertion framework, that relies on generics and simplicity. |
|
Package dt provides container type implementations and interfaces.
|
Package dt provides container type implementations and interfaces. |
|
cmp
Package cmp provides comparators for sorting linked lists.
|
Package cmp provides comparators for sorting linked lists. |
|
hdrhist
Package hdrhist provides an implementation of Gil Tene's HDR Histogram data structure.
|
Package hdrhist provides an implementation of Gil Tene's HDR Histogram data structure. |
|
is
Package is contains a simple assertion library for the fun/ensure testing framework.
|
Package is contains a simple assertion library for the fun/ensure testing framework. |
|
Package erc provides a simple/fast error aggregation tool for collecting and aggregating errors.
|
Package erc provides a simple/fast error aggregation tool for collecting and aggregating errors. |
|
Package ers provides some very basic error aggregating and handling tools, as a companion to erc.
|
Package ers provides some very basic error aggregating and handling tools, as a companion to erc. |
|
Package ft provides high-level function tools for manipulating common function objects and types.
|
Package ft provides high-level function tools for manipulating common function objects and types. |
|
Package intish provides a collection of strongly type integer arithmetic operations, to make it possible to avoid floating point math for simple operations when desired.
|
Package intish provides a collection of strongly type integer arithmetic operations, to make it possible to avoid floating point math for simple operations when desired. |
|
Package itertool provides a set of functional helpers for managinging and using fun.Streams, including a parallel processing, futures, Map/Reduce, Merge, and other convenient tools.
|
Package itertool provides a set of functional helpers for managinging and using fun.Streams, including a parallel processing, futures, Map/Reduce, Merge, and other convenient tools. |
|
Package pubsub provides a message broker for one-to-many or many-to-many message distribution.
|
Package pubsub provides a message broker for one-to-many or many-to-many message distribution. |
|
Package risky contains a bunch of bad ideas for APIs and operations that will definitely lead to panics and deadlocks and incorrect behavior when used incorrectly.
|
Package risky contains a bunch of bad ideas for APIs and operations that will definitely lead to panics and deadlocks and incorrect behavior when used incorrectly. |
|
Package srv provides a framework and toolkit for service orchestration.
|
Package srv provides a framework and toolkit for service orchestration. |
|
Package testt (for test tools), provides a couple of useful helpers for common test patterns.
|
Package testt (for test tools), provides a couple of useful helpers for common test patterns. |