Observed issue
==============
The clear tests occasionally fail with the following babeltrace error
when a live session is stopped following a "clear". Unfortunately, this
problem only seems to occur on certain machines. In my case, I only
managed to reproduce this on the CI's workers.
10-07 12:39:48.333 7679 7679 E PLUGIN/SRC.CTF.LTTNG-LIVE/VIEWER lttng_live_get_stream_bytes@viewer-connection.c:1610 [lttng-live] Received get_data_packet response: error
10-07 12:39:48.333 7679 7679 E PLUGIN/CTF/MSG-ITER request_medium_bytes@msg-iter.c:563 [lttng-live] User function failed: status=ERROR
10-07 12:39:48.333 7679 7679 E PLUGIN/CTF/MSG-ITER ctf_msg_iter_get_next_message@msg-iter.c:2899 [lttng-live] Cannot handle state: msg-it-addr=0x5603c28e2830, state=DSCOPE_TRACE_PACKET_HEADER_BEGIN
10-07 12:39:48.333 7679 7679 E PLUGIN/SRC.CTF.LTTNG-LIVE lttng_live_iterator_next_handle_one_active_data_stream@lttng-live.c:845 [lttng-live] CTF message iterator failed to get next message: msg-iter=0x5603c28e2830, msg-iter-status=ERROR
10-07 12:39:48.333 7679 7679 E PLUGIN/SRC.CTF.LTTNG-LIVE lttng_live_msg_iter_next@lttng-live.c:1665 [lttng-live] Error preparing the next batch of messages: live-iter-status=LTTNG_LIVE_ITERATOR_STATUS_ERROR
10-07 12:39:48.333 7679 7679 W LIB/MSG-ITER bt_message_iterator_next@iterator.c:864 Component input port message iterator's "next" method failed: iter-addr=0x5603c28cb0f0, iter-upstream-comp-name="lttng-live", iter-upstream-comp-log-level=WARNING, iter-upstream-comp-class-type=SOURCE, iter-upstream-comp-class-name="lttng-live", iter-upstream-comp-class-partial-descr="Connect to an LTTng relay daemon", iter-upstream-port-type=OUTPUT, iter-upstream-port-name="out", status=ERROR
10-07 12:39:48.333 7679 7679 E PLUGIN/FLT.UTILS.MUXER muxer_upstream_msg_iter_next@muxer.c:454 [muxer] Upstream iterator's next method returned an error: status=ERROR
10-07 12:39:48.333 7679 7679 E PLUGIN/FLT.UTILS.MUXER validate_muxer_upstream_msg_iters@muxer.c:991 [muxer] Cannot validate muxer's upstream message iterator wrapper: muxer-msg-iter-addr=0x5603c28dbe70, muxer-upstream-msg-iter-wrap-addr=0x5603c28cd0f0
10-07 12:39:48.333 7679 7679 E PLUGIN/FLT.UTILS.MUXER muxer_msg_iter_next@muxer.c:1415 [muxer] Cannot get next message: comp-addr=0x5603c28dc960, muxer-comp-addr=0x5603c28db0a0, muxer-msg-iter-addr=0x5603c28dbe70, msg-iter-addr=0x5603c28caf80, status=ERROR
10-07 12:39:48.333 7679 7679 W LIB/MSG-ITER bt_message_iterator_next@iterator.c:864 Component input port message iterator's "next" method failed: iter-addr=0x5603c28caf80, iter-upstream-comp-name="muxer", iter-upstream-comp-log-level=WARNING, iter-upstream-comp-class-type=FILTER, iter-upstream-comp-class-name="muxer", iter-upstream-comp-class-partial-descr="Sort messages from multiple inpu", iter-upstream-port-type=OUTPUT, iter-upstream-port-name="out", status=ERROR
10-07 12:39:48.333 7679 7679 W LIB/GRAPH consume_graph_sink@graph.c:473 Component's "consume" method failed: status=ERROR, comp-addr=0x5603c28dcb60, comp-name="pretty", comp-log-level=WARNING, comp-class-type=SINK, comp-class-name="pretty", comp-class-partial-descr="Pretty-print messages (`text` fo", comp-class-is-frozen=0, comp-class-so-handle-addr=0x5603c28c8140, comp-class-so-handle-path="/home/jenkins/jgalar-debug/build/usr/lib/babeltrace2/plugins/babeltrace-plugin-text.so", comp-input-port-count=1, comp-output-port-count=0
10-07 12:39:48.333 7679 7679 E CLI cmd_run@babeltrace2.c:2548 Graph failed to complete successfully
10-07 12:39:48.333 7679 7679 E PLUGIN/SRC.CTF.LTTNG-LIVE/VIEWER lttng_live_session_detach@viewer-connection.c:1227 [lttng-live] Unknown detach return code 0
ERROR: [Babeltrace CLI] (babeltrace2.c:2548)
Graph failed to complete successfully
CAUSED BY [libbabeltrace2] (graph.c:473)
Component's "consume" method failed: status=ERROR, comp-addr=0x5603c28dcb60,
comp-name="pretty", comp-log-level=WARNING, comp-class-type=SINK,
comp-class-name="pretty", comp-class-partial-descr="Pretty-print messages
(`text` fo", comp-class-is-frozen=0, comp-class-so-handle-addr=0x5603c28c8140,
comp-class-so-handle-path="/home/jenkins/jgalar-debug/build/usr/lib/babeltrace2/plugins/babeltrace-plugin-text.so",
comp-input-port-count=1, comp-output-port-count=0
CAUSED BY [libbabeltrace2] (iterator.c:864)
Component input port message iterator's "next" method failed:
iter-addr=0x5603c28caf80, iter-upstream-comp-name="muxer",
iter-upstream-comp-log-level=WARNING, iter-upstream-comp-class-type=FILTER,
iter-upstream-comp-class-name="muxer",
iter-upstream-comp-class-partial-descr="Sort messages from multiple inpu",
iter-upstream-port-type=OUTPUT, iter-upstream-port-name="out", status=ERROR
CAUSED BY [muxer: 'filter.utils.muxer'] (muxer.c:991)
Cannot validate muxer's upstream message iterator wrapper:
muxer-msg-iter-addr=0x5603c28dbe70,
muxer-upstream-msg-iter-wrap-addr=0x5603c28cd0f0
CAUSED BY [muxer: 'filter.utils.muxer'] (muxer.c:454)
Upstream iterator's next method returned an error: status=ERROR
CAUSED BY [libbabeltrace2] (iterator.c:864)
Component input port message iterator's "next" method failed:
iter-addr=0x5603c28cb0f0, iter-upstream-comp-name="lttng-live",
iter-upstream-comp-log-level=WARNING, iter-upstream-comp-class-type=SOURCE,
iter-upstream-comp-class-name="lttng-live",
iter-upstream-comp-class-partial-descr="Connect to an LTTng relay daemon",
iter-upstream-port-type=OUTPUT, iter-upstream-port-name="out", status=ERROR
CAUSED BY [lttng-live: 'source.ctf.lttng-live'] (lttng-live.c:1665)
Error preparing the next batch of messages:
live-iter-status=LTTNG_LIVE_ITERATOR_STATUS_ERROR
CAUSED BY [lttng-live: 'source.ctf.lttng-live'] (lttng-live.c:845)
CTF message iterator failed to get next message: msg-iter=0x5603c28e2830,
msg-iter-status=ERROR
CAUSED BY [lttng-live: 'source.ctf.lttng-live'] (msg-iter.c:2899)
Cannot handle state: msg-it-addr=0x5603c28e2830,
state=DSCOPE_TRACE_PACKET_HEADER_BEGIN
CAUSED BY [lttng-live: 'source.ctf.lttng-live'] (msg-iter.c:563)
User function failed: status=ERROR
CAUSED BY [lttng-live: 'source.ctf.lttng-live'] (viewer-connection.c:1610)
Received get_data_packet response: error
This occurs immediately following a 'stop' on the session. As the error
indicates, a request to obtain a data packet fails with a generic
error reply.
Moreover, the following LTTNG_VIEWER_DETACH_SESSION appears to fail
with an invalid status code. This is addressed in a different commit.
Reproducing the test's failure without redirecting the relay daemon's
allows us to see the following errors after the first stop:
PERROR - 14:33:44.
929675253 [25108/25115]: Failed to open fs handle to ust/uid/1001/64-bit/index/chan_0.idx, open() returned: No such file or directory (in fd_tracker_open_fs_handle() at fd-tracker.c:550)
PERROR - 14:33:45.
030037417 [25108/25115]: Failed to open fs handle to ust/uid/1001/64-bit/index/chan_0.idx, open() returned: No such file or directory (in fd_tracker_open_fs_handle() at fd-tracker.c:550)
PERROR - 14:33:45.
130429370 [25108/25115]: Failed to open fs handle to ust/uid/1001/64-bit/index/chan_0.idx, open() returned: No such file or directory (in fd_tracker_open_fs_handle() at fd-tracker.c:550)
PERROR - 14:33:45.
230829447 [25108/25115]: Failed to open fs handle to ust/uid/1001/64-bit/index/chan_0.idx, open() returned: No such file or directory (in fd_tracker_open_fs_handle() at fd-tracker.c:550)
PERROR - 14:33:45.
331223320 [25108/25115]: Failed to open fs handle to ust/uid/1001/64-bit/index/chan_0.idx, open() returned: No such file or directory (in fd_tracker_open_fs_handle() at fd-tracker.c:550)
This is produced with the following back-trace:
(gdb) bt
#0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
#1 0x00007ffff69648b1 in __GI_abort () at abort.c:79
#2 0x00005555555b4f1f in fd_tracker_open_fs_handle (tracker=0x55555582c620, directory=0x7fffe8006680,
path=0x7ffff0a25870 "ust/uid/1001/64-bit/index/chan_1.idx", flags=0, mode=0x7ffff0a24508) at fd-tracker.c:550
#3 0x0000555555595c34 in _lttng_trace_chunk_open_fs_handle_locked (chunk=0x7fffe0002130, file_path=0x7ffff0a25870 "ust/uid/1001/64-bit/index/chan_1.idx",
flags=0, mode=432, out_handle=0x7ffff0a24710, expect_no_file=true) at trace-chunk.c:1388
#4 0x0000555555595eef in lttng_trace_chunk_open_fs_handle (chunk=0x7fffe0002130, file_path=0x7ffff0a25870 "ust/uid/1001/64-bit/index/chan_1.idx", flags=0,
mode=432, out_handle=0x7ffff0a24710, expect_no_file=true) at trace-chunk.c:1433
#5 0x00005555555da6c2 in _lttng_index_file_create_from_trace_chunk (chunk=0x7fffe0002130, channel_path=0x7fffe8018c30 "ust/uid/1001/64-bit",
stream_name=0x7fffe8018c10 "chan_1", stream_file_size=0, stream_file_index=0, index_major=1, index_minor=1, unlink_existing_file=false, flags=0,
expect_no_file=true, file=0x7fffe0002270) at index.c:97
#6 0x00005555555dad8a in lttng_index_file_create_from_trace_chunk_read_only (chunk=0x7fffe0002130, channel_path=0x7fffe8018c30 "ust/uid/1001/64-bit",
stream_name=0x7fffe8018c10 "chan_1", stream_file_size=0, stream_file_index=0, index_major=1, index_minor=1, expect_no_file=true, file=0x7fffe0002270)
at index.c:186
#7 0x000055555557640f in try_open_index (vstream=0x7fffe0002250, rstream=0x7fffe8018c50) at live.c:1378
#8 0x0000555555577155 in viewer_get_next_index (conn=0x7fffd4001440) at live.c:1643
#9 0x0000555555579a01 in process_control (recv_hdr=0x7ffff0a27c30, conn=0x7fffd4001440) at live.c:2311
#10 0x000055555557a1db in thread_worker (data=0x0) at live.c:2482
#11 0x00007ffff6d1c6db in start_thread (arg=0x7ffff0a28700) at pthread_create.c:463
#12 0x00007ffff6a45a3f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
That problem is mostly cosmetic in nature (the open can fail
"legitimately") as the PERROR should simply not be printed and is
addressed in a different commit.
This error is also produced after a 'clear' is issued:
PERROR - 14:33:45.
532782268 [25108/25115]: Failed to read from file system handle of viewer stream id 1, offset: 4096: No such file or directory (in viewer_get_packet() at live.c:1849)
Which is produced with the following back-trace:
#0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
#1 0x00007f53e297c8b1 in __GI_abort () at abort.c:79
#2 0x000055dd77ccef2c in viewer_get_packet (conn=0x7f53c4001100) at live.c:1850
#3 0x000055dd77cd0a15 in process_control (recv_hdr=0x7f53dca3fc30, conn=0x7f53c4001100) at live.c:2315
#4 0x000055dd77cd11db in thread_worker (data=0x0) at live.c:2483
#5 0x00007f53e2d346db in start_thread (arg=0x7f53dca40700) at pthread_create.c:463
#6 0x00007f53e2a5da3f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
A similar problem occurs, although more rarely, when reading an
index entry in viewer_get_next_index().
Cause
=====
The following situation leads to both failures to get a
packet and failures to get the next index:
- Viewer connects to an existing session,
- Viewer consumes a number of packets, alternating the
GET_NEXT_INDEX and GET_PACKET command,
- The session's streams are rotated to a new trace chunk
(as part of a clear),
- The session is started and stopped, causing new packets
to be produced and received,
- The session is stopped and destroyed, causing the session's
streams to rotate into a "null" trace chunk (no active
trace files),
- Viewer issues GET_NEXT_INDEX or GET_PACKET, but the fact
that a rotation occurred on the receiving end is not detected
as the relay streams' trace chunk are "null".
The crux of the problem is that lttng_trace_chunk_ids_equal() is
bypassed when the current trace chunk of a relay stream is "null".
The rationale for skipping this check is that it is assumed that the
files currently opened by the live server can can still be used even
if the consumer has rotated the corresponding streams into a 'null'
trace chunk, meaning no trace chunk is 'set' for those streams.
This makes sense in one scenario: the session was destroyed and we wish
to allow a connected live client to finish consuming the trace packets
up to the end of the session's lifetime.
Here, the situation is different. The viewer is reading chunk 'A'.
Meanwhile, a rotation occurs into chunk 'B' and packets are received for
chunk 'B'. Then, a rotation to a 'null' chunk (no active chunk) occurs.
In essence, the live server never sees the rotation between chunk 'A'
and 'B', and simply assumes that a rotation from 'A' to 'null' occurred,
as would happen at the end of a session.
In terms of the code, in viewer_get_next_index(), a call to
check_index_status() is performed to determine if an index is available.
The function checks that `index_received_seqcount` is greater than
`index_sent_seqcount`. In that case, it determines that an index must be
available.
Unfortunately, there is no way for the live server to determine that the
remaining indexes are in a chunk that doesn't exist anymore (chunk 'B').
Thus, viewer_get_next_index() attempts to read an index entry from the
current index file and fails.
Solution
========
1) lttng_trace_chunk_ids_equal() is modified to properly handle
'null' trace chunks:
- A null and a non-null trace chunk are not equal,
- Two null trace chunks are equal.
2) Rotation count
A rotation counter is introduced to track the number of rotations
that occurred during a relay stream's lifetime. This counter is
sampled by the matching viewer streams on creation and on rotation
and is used to determine if all rotations were "seen" by the viewer
stream.
Hence, this allows us to handle the special case where a viewer
is consuming the contents of a relay stream that just transitioned
into a 'null' trace chunk (see comments in patch).
The rest of the modifications simply allow the live server to handle
null trace chunks in viewer streams. This fixes another unrelated bug
that I observed while investigating this: sessions that don't have an
active trace chunk are not shown when listing sessions with babeltrace.
To reproduce, simply stop, clear a session, and attempt to list the
sessions of the associated relay daemon.
Known drawbacks
===============
None.
Signed-off-by: Jérémie Galarneau <jeremie.galarneau@efficios.com>
Change-Id: Ibb3116990e34b7ec3b477f3482d0c0ff1e848d09
assert(relay_session);
ASSERT_LOCKED(relay_session->lock);
- if (!viewer_session->current_trace_chunk) {
- ERR("Internal error: viewer session associated with session \"%s\" has a NULL trace chunk",
- relay_session->session_name);
- ret = -1;
- goto error;
- }
-
if (relay_session->connection_closed) {
*closed = true;
}
viewer_stream = viewer_stream_get_by_id(
relay_stream->stream_handle);
if (!viewer_stream) {
- struct lttng_trace_chunk *viewer_stream_trace_chunk;
+ struct lttng_trace_chunk *viewer_stream_trace_chunk = NULL;
/*
* Save that we sent the metadata stream to the
goto error_unlock;
}
} else {
- const bool reference_acquired = lttng_trace_chunk_get(
- viewer_session->current_trace_chunk);
+ bool reference_acquired;
+
+ /*
+ * Transition the viewer session into the newest trace chunk available.
+ */
+ if (!lttng_trace_chunk_ids_equal(viewer_session->current_trace_chunk,
+ relay_stream->trace_chunk)) {
+
+ ret = viewer_session_set_trace_chunk_copy(
+ viewer_session,
+ relay_stream->trace_chunk);
+ if (ret) {
+ ret = -1;
+ ctf_trace_put(ctf_trace);
+ goto error_unlock;
+ }
+ }
+ reference_acquired = lttng_trace_chunk_get(
+ viewer_session->current_trace_chunk);
assert(reference_acquired);
viewer_stream_trace_chunk =
viewer_session->current_trace_chunk;
error_unlock:
rcu_read_unlock();
-error:
+
if (relay_stream) {
pthread_mutex_unlock(&relay_stream->lock);
stream_put(relay_stream);
/* Skip closed session */
goto next_session;
}
- if (!session->current_trace_chunk) {
- /*
- * Skip un-attachable session. It is either
- * being destroyed or has not had a trace
- * chunk created against it yet.
- */
- goto next_session;
- }
if (count >= buf_count) {
struct lttng_viewer_session *newbuf;
DBG("Attach session ID %" PRIu64 " received", session_id);
pthread_mutex_lock(&session->lock);
- if (!session->current_trace_chunk) {
- /*
- * Session is either being destroyed or it never had a trace
- * chunk created against it.
- */
- DBG("Session requested by live client has no current trace chunk, returning unknown session");
- response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
- goto send_reply;
- }
if (session->live_timer == 0) {
DBG("Not live session");
response.status = htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE);
/*
* First time, we open the index file and at least one index is ready.
*/
- if (rstream->index_received_seqcount == 0) {
+ if (rstream->index_received_seqcount == 0 ||
+ !vstream->stream_file.trace_chunk) {
ret = -ENOENT;
goto end;
}
+
chunk_status = lttng_index_file_create_from_trace_chunk_read_only(
vstream->stream_file.trace_chunk, rstream->path_name,
rstream->channel_name, rstream->tracefile_size,
return 1;
}
+static
+void viewer_stream_rotate_to_trace_chunk(struct relay_viewer_stream *vstream,
+ struct lttng_trace_chunk *new_trace_chunk)
+{
+ lttng_trace_chunk_put(vstream->stream_file.trace_chunk);
+
+ if (new_trace_chunk) {
+ const bool acquired_reference = lttng_trace_chunk_get(
+ new_trace_chunk);
+
+ assert(acquired_reference);
+ }
+
+ vstream->stream_file.trace_chunk = new_trace_chunk;
+ viewer_stream_sync_tracefile_array_tail(vstream);
+ viewer_stream_close_files(vstream);
+}
+
/*
* Send the next index for a stream.
*
goto send_reply;
}
- if (rstream->trace_chunk && !lttng_trace_chunk_ids_equal(
+ /*
+ * Transition the viewer session into the newest trace chunk available.
+ */
+ if (!lttng_trace_chunk_ids_equal(
conn->viewer_session->current_trace_chunk,
rstream->trace_chunk)) {
DBG("Relay stream and viewer chunk ids differ");
goto send_reply;
}
}
- if (conn->viewer_session->current_trace_chunk !=
- vstream->stream_file.trace_chunk) {
- bool acquired_reference;
+ /*
+ * Transition the viewer stream into the latest trace chunk available.
+ *
+ * Note that the stream must _not_ rotate in one precise condition:
+ * the relay stream has rotated to a NULL trace chunk and the viewer
+ * stream is consuming the trace chunk that was active just before
+ * that rotation to NULL.
+ *
+ * This allows clients to consume all the packets of a trace chunk
+ * after a session's destruction.
+ */
+ if (conn->viewer_session->current_trace_chunk != vstream->stream_file.trace_chunk &&
+ !(rstream->completed_rotation_count == vstream->last_seen_rotation_count + 1 && !rstream->trace_chunk)) {
DBG("Viewer session and viewer stream chunk differ: "
"vsession chunk %p vstream chunk %p",
conn->viewer_session->current_trace_chunk,
vstream->stream_file.trace_chunk);
- lttng_trace_chunk_put(vstream->stream_file.trace_chunk);
- acquired_reference = lttng_trace_chunk_get(conn->viewer_session->current_trace_chunk);
- assert(acquired_reference);
- vstream->stream_file.trace_chunk =
- conn->viewer_session->current_trace_chunk;
- viewer_stream_sync_tracefile_array_tail(vstream);
- viewer_stream_close_files(vstream);
+ viewer_stream_rotate_to_trace_chunk(vstream,
+ conn->viewer_session->current_trace_chunk);
+ vstream->last_seen_rotation_count =
+ rstream->completed_rotation_count;
}
ret = check_index_status(vstream, rstream, ctf_trace, &viewer_index);
lttng_trace_chunk_put(stream->trace_chunk);
stream->trace_chunk = stream->ongoing_rotation.value.next_trace_chunk;
stream->ongoing_rotation = (typeof(stream->ongoing_rotation)) {};
+ stream->completed_rotation_count++;
}
static int stream_create_data_output_file_from_trace_chunk(
*/
struct lttng_trace_chunk *trace_chunk;
LTTNG_OPTIONAL(struct relay_stream_rotation) ongoing_rotation;
+ uint64_t completed_rotation_count;
};
struct relay_stream *stream_create(struct ctf_trace *trace,
int ret = 0;
struct lttng_trace_chunk *viewer_chunk;
- assert(relay_session_trace_chunk);
lttng_trace_chunk_put(vsession->current_trace_chunk);
vsession->current_trace_chunk = NULL;
DBG("Copying relay session's current trace chunk to the viewer session");
+ if (!relay_session_trace_chunk) {
+ goto end;
+ }
+
viewer_chunk = lttng_trace_chunk_copy(relay_session_trace_chunk);
if (!viewer_chunk) {
ERR("Failed to create a viewer trace chunk from the relay session's current chunk");
} else {
int ret;
- assert(session->current_trace_chunk);
assert(!vsession->current_trace_chunk);
session->viewer_attached = true;
static void viewer_stream_destroy(struct relay_viewer_stream *vstream)
{
+ lttng_trace_chunk_put(vstream->stream_file.trace_chunk);
free(vstream->path_name);
free(vstream->channel_name);
free(vstream);
enum lttng_viewer_seek seek_t)
{
struct relay_viewer_stream *vstream = NULL;
- const bool acquired_reference = lttng_trace_chunk_get(trace_chunk);
ASSERT_LOCKED(stream->lock);
- if (!acquired_reference) {
- goto error;
- }
vstream = zmalloc(sizeof(*vstream));
if (!vstream) {
goto error;
}
+ if (trace_chunk) {
+ const bool acquired_reference = lttng_trace_chunk_get(
+ trace_chunk);
+
+ assert(acquired_reference);
+ }
+
vstream->stream_file.trace_chunk = trace_chunk;
- trace_chunk = NULL;
vstream->path_name = lttng_strndup(stream->path_name, LTTNG_VIEWER_PATH_MAX);
if (vstream->path_name == NULL) {
PERROR("relay viewer path_name alloc");
vstream);
}
+ vstream->last_seen_rotation_count = stream->completed_rotation_count;
+
/* Globally visible after the add unique. */
lttng_ht_node_init_u64(&vstream->stream_n, stream->stream_handle);
urcu_ref_init(&vstream->ref);
if (vstream) {
viewer_stream_destroy(vstream);
}
- if (trace_chunk && acquired_reference) {
- lttng_trace_chunk_put(trace_chunk);
- }
return NULL;
}
} stream_file;
/* index file from which to read the index data. */
struct lttng_index_file *index_file;
+ /*
+ * Last seen rotation count in stream.
+ *
+ * Sampled on every change to the viewer stream trace chunk,
+ * this allows the live server to determine if it saw the latest
+ * rotation that occurred on the receiving end.
+ */
+ uint64_t last_seen_rotation_count;
char *path_name;
char *channel_name;
{
bool equal = false;
- if (!chunk_a || !chunk_b) {
+ if (chunk_a == chunk_b) {
+ equal = true;
+ goto end;
+ }
+
+ if (!!chunk_a ^ !!chunk_b) {
goto end;
}