Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Updates from review
  • Loading branch information
dpkp committed Dec 21, 2017
commit 21484ca84c2c24ea9f29bb2b5ae705d4f598db9f
3 changes: 1 addition & 2 deletions kafka/client_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -735,14 +735,13 @@ def add_topic(self, topic):
self._topics.add(topic)
return self.cluster.request_update()

# request metadata update on disconnect and timedout
# This method should be locked when running multi-threaded
def _maybe_refresh_metadata(self):
"""Send a metadata request if needed.

Returns:
int: milliseconds until next refresh
"""
# This should be locked when running multi-threaded
ttl = self.cluster.ttl()
wait_for_in_progress_ms = self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0
metadata_timeout = max(ttl, wait_for_in_progress_ms)
Expand Down
2 changes: 1 addition & 1 deletion kafka/conn.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ def can_send_more(self):
def recv(self):
"""Non-blocking network receive.

Return list of (response, future)
Return list of (response, future) tuples
"""
if not self.connected() and not self.state is ConnectionStates.AUTHENTICATING:
log.warning('%s cannot recv: socket not connected', self)
Expand Down
8 changes: 4 additions & 4 deletions kafka/coordinator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,10 +292,10 @@ def poll_heartbeat(self):
"""
Check the status of the heartbeat thread (if it is active) and indicate
the liveness of the client. This must be called periodically after
joining with :meth:`.ensureActiveGroup` to ensure that the member stays
joining with :meth:`.ensure_active_group` to ensure that the member stays
in the group. If an interval of time longer than the provided rebalance
timeout expires without calling this method, then the client will
proactively leave the group.
timeout (max_poll_interval_ms) expires without calling this method, then
the client will proactively leave the group.

Raises: RuntimeError for unexpected errors raised from the heartbeat thread
"""
Expand Down Expand Up @@ -330,7 +330,7 @@ def _handle_join_success(self, member_assignment_bytes):
self._generation.protocol,
member_assignment_bytes)

def _handle_join_failure(self, exception):
def _handle_join_failure(self, _):
with self._lock:
self.join_future = None
self.state = MemberState.UNJOINED
Expand Down
17 changes: 5 additions & 12 deletions kafka/coordinator/consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def poll(self):
# track of the fact that we need to rebalance again to reflect the
# change to the topic subscription. Without ensuring that the
# metadata is fresh, any metadata update that changes the topic
# subscriptions and arrives with a rebalance in progress will
# subscriptions and arrives while a rebalance is in progress will
# essentially be ignored. See KAFKA-3949 for the complete
# description of the problem.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this PR also fix #1241?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, that will need a separate PR

if self._subscription.subscribed_pattern:
Expand All @@ -264,11 +264,7 @@ def poll(self):
self._maybe_auto_commit_offsets_async()

def time_to_next_poll(self):
"""
Return the time to the next needed invocation of {@link #poll(long)}.
@param now current time in milliseconds
@return the maximum time in milliseconds the caller should wait before the next invocation of poll()
"""
"""Return seconds (float) remaining until :meth:`.poll` should be called again"""
if not self.config['enable_auto_commit']:
return self.time_to_next_heartbeat()

Expand Down Expand Up @@ -396,12 +392,9 @@ def close(self, autocommit=True):
super(ConsumerCoordinator, self).close()

def _invoke_completed_offset_commit_callbacks(self):
try:
while True:
callback, offsets, exception = self.completed_offset_commits.popleft()
callback(offsets, exception)
except IndexError:
pass
while self.completed_offset_commits:
callback, offsets, exception = self.completed_offset_commits.popleft()
callback(offsets, exception)

def commit_offsets_async(self, offsets, callback=None):
"""Commit specific offsets asynchronously.
Expand Down
4 changes: 1 addition & 3 deletions kafka/coordinator/heartbeat.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def received_heartbeat(self):
self.last_receive = time.time()

def time_to_next_heartbeat(self):
"""Returns seconds (float) remaining before next heartbeat should be sent"""
time_since_last_heartbeat = time.time() - max(self.last_send, self.last_reset)
if self.heartbeat_failed:
delay_to_next_heartbeat = self.config['retry_backoff_ms'] / 1000
Expand All @@ -58,9 +59,6 @@ def session_timeout_expired(self):
last_recv = max(self.last_receive, self.last_reset)
return (time.time() - last_recv) > (self.config['session_timeout_ms'] / 1000)

def interval(self):
return self.config['heartbeat_interval_ms'] / 1000

def reset_timeouts(self):
self.last_reset = time.time()
self.last_poll = time.time()
Expand Down
7 changes: 4 additions & 3 deletions kafka/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,12 @@ def __init__(self, *args, **kwargs):
"""Commit cannot be completed since the group has already
rebalanced and assigned the partitions to another member.
This means that the time between subsequent calls to poll()
was longer than the configured max.poll.interval.ms, which
was longer than the configured max_poll_interval_ms, which
typically implies that the poll loop is spending too much
time message processing. You can address this either by
increasing the session timeout or by reducing the maximum
size of batches returned in poll() with max.poll.records.
increasing the rebalance timeout with max_poll_interval_ms,
or by reducing the maximum size of batches returned in poll()
with max_poll_records.
""", *args, **kwargs)


Expand Down