Thanks to visit codestin.com
Credit goes to github.com

Skip to content
This repository was archived by the owner on Jan 20, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,7 @@ object FlatMapShards {
val default = FlatMapShards(0)
}

/**
* Setting this will force a random shuffle to the specified number of reducers.
*/
case class FlatMapShards(count: Int)
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,7 @@ object Reducers {
val default = Reducers(-1)
}

/**
* This specifies the number of reducers to be used in the context of the map-reduce framework.
*/
case class Reducers(count: Int)
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
package com.twitter.summingbird.online.option

import com.twitter.util.Duration

case class OnlineSuccessHandler(handlerFn: Unit => Unit)
Expand Down Expand Up @@ -41,36 +42,38 @@ case class MaxWaitingFutures(get: Int)
*/
case class MaxFutureWaitTime(get: Duration)

/*
FlushFrequency is how often regardless of traffic a given Cache should be flushed to the network.
*/
/**
* FlushFrequency is how often, regardless of traffic, a given Cache should be flushed to the network.
*/
case class FlushFrequency(get: Duration)

/*
UseAsyncCache is used to enable a background asynchronous cache. These do all cache related operations in background threads.
/**
* UseAsyncCache is used to enable a background asynchronous cache. These do all cache related operations in
* background threads.
*/
case class UseAsyncCache(get: Boolean)

/*
AsyncPoolSize controls the size of the fixed thread pool used to back an asynchronous cache.
Only will have an effect if UseAsyncCache is true
*/
/**
* AsyncPoolSize controls the size of the fixed thread pool used to back an asynchronous cache.
* Only will have an effect if UseAsyncCache is true
*/
case class AsyncPoolSize(get: Int)

/*
MaxEmitPerExecute controls the number of elements that can at once be emitted to the underlying platform.
Must be careful this is >> than your fan out or more tuples could be generated than are emitted.
*/
/**
* MaxEmitPerExecute controls the number of elements that can at once be emitted to the underlying platform.
* Must be careful this is >> than your fan out or more tuples could be generated than are emitted.
*/
case class MaxEmitPerExecute(get: Int)

/*
SoftMemoryFlushPercent is the percentage of memory used in the JVM at which a flush will be triggered of the cache.
*/
/**
* SoftMemoryFlushPercent is the percentage of memory used in the JVM at which a flush will be triggered of the cache.
*/
case class SoftMemoryFlushPercent(get: Float) {
require(0 < get && get <= 100.0, "must be a percentage.")
}

/*
ValueCombinerCacheSize is used in cache's that support it as a trigger to crush down a high locality of values without emitting
*/
/**
* ValueCombinerCacheSize is used in caches that support it as a trigger to crush down a high locality of
* values without emitting.
*/
case class ValueCombinerCacheSize(get: Int)
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ case class SpoutParallelism(parHint: Int)
case class FlatMapParallelism(parHint: Int)

/**
* This stupidity is necessary because val parameters can't be
* This workaround is necessary because val parameters can't be
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ORLY?

* call-by-name. We pass a function so that the metrics aren't
* serialized. Beyond the storm IMetric not being serializable,
* passing a value also causes problems with the instance registered
Expand All @@ -45,6 +45,10 @@ object FlatMapStormMetrics {
def unapply(metrics: FlatMapStormMetrics) = Some(metrics.metrics)
}

/**
* When a bolt is prepared, these metrics will be use by being called with the TopologyContext for the storm
* bolt.
*/
class FlatMapStormMetrics(val metrics: () => TraversableOnce[StormMetric[IMetric]])


Expand All @@ -58,6 +62,17 @@ class SpoutStormMetrics(val metrics: () => TraversableOnce[StormMetric[IMetric]]
{() => metrics().map{ x: StormMetric[IMetric] => Metric(x.name, x.metric, x.interval.inSeconds)}}
}

/**
* This signals that the storm bolts should use localOrShuffleGrouping, which means that if the downstream bolt
* has a task on the same local worker, the output will only go to those tasks. Otherwise, shuffling
* happens normally. This is important to understand as this can create hot spots in the topology.
*/
case class PreferLocalDependency(get: Boolean)

/**
* If this is set to true, this means that a bolt will ack a tuple as soon as it is received and processing begins;
* otherwise, the tuple will be acked when the bolt completes. Acking signals to storm that a tuple has been fully
* processed, so if a tuple is acked on entry and then there is a failure it will not be replayed per storm's
* normal replay mechanisms.
*/
case class AckOnEntry(get: Boolean)
Original file line number Diff line number Diff line change
Expand Up @@ -47,5 +47,9 @@ object SummerStormMetrics {
}
class SummerStormMetrics(val metrics: () => TraversableOnce[StormMetric[_]])


case class SummerBatchMultiplier(get: Int)
/**
* This value is mulitplied by the summer parallelism to set the true value used to hash and shard the
* key/value pairs. This allows for there to be more, smaller batches sent out to a number of threads
* which are set by SummerParallelism.
*/
case class SummerBatchMultiplier(get: Int)