Skip to content

Commit

Permalink
aio: drop io_flush argument
Browse files Browse the repository at this point in the history
The .io_flush() handler no longer exists and has no users.  Drop the
io_flush argument to aio_set_fd_handler() and related functions.

The AioFlushEventNotifierHandler and AioFlushHandler typedefs are no
longer used and are dropped too.

Reviewed-by: Paolo Bonzini <[email protected]>
Signed-off-by: Stefan Hajnoczi <[email protected]>
  • Loading branch information
stefanhaRH committed Aug 19, 2013
1 parent 1b9ecdb commit f2e5dca
Show file tree
Hide file tree
Showing 16 changed files with 61 additions and 88 deletions.
7 changes: 2 additions & 5 deletions aio-posix.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ void aio_set_fd_handler(AioContext *ctx,
int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque)
{
AioHandler *node;
Expand Down Expand Up @@ -95,12 +94,10 @@ void aio_set_fd_handler(AioContext *ctx,

void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush)
EventNotifierHandler *io_read)
{
aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
(IOHandler *)io_read, NULL,
(AioFlushHandler *)io_flush, notifier);
(IOHandler *)io_read, NULL, notifier);
}

bool aio_pending(AioContext *ctx)
Expand Down
3 changes: 1 addition & 2 deletions aio-win32.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ struct AioHandler {

void aio_set_event_notifier(AioContext *ctx,
EventNotifier *e,
EventNotifierHandler *io_notify,
AioFlushEventNotifierHandler *io_flush)
EventNotifierHandler *io_notify)
{
AioHandler *node;

Expand Down
4 changes: 2 additions & 2 deletions async.c
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ aio_ctx_finalize(GSource *source)
AioContext *ctx = (AioContext *) source;

thread_pool_free(ctx->thread_pool);
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
aio_set_event_notifier(ctx, &ctx->notifier, NULL);
event_notifier_cleanup(&ctx->notifier);
qemu_mutex_destroy(&ctx->bh_lock);
g_array_free(ctx->pollfds, TRUE);
Expand Down Expand Up @@ -243,7 +243,7 @@ AioContext *aio_context_new(void)
event_notifier_init(&ctx->notifier, false);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear, NULL);
event_notifier_test_and_clear);

return ctx;
}
Expand Down
9 changes: 4 additions & 5 deletions block/curl.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,17 +93,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
switch (action) {
case CURL_POLL_IN:
qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, s);
qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s);
break;
case CURL_POLL_OUT:
qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, s);
qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s);
break;
case CURL_POLL_INOUT:
qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
NULL, s);
qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s);
break;
case CURL_POLL_REMOVE:
qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(fd, NULL, NULL, NULL);
break;
}

Expand Down
7 changes: 3 additions & 4 deletions block/gluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
}
fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
qemu_gluster_aio_event_reader, NULL, NULL, s);
qemu_gluster_aio_event_reader, NULL, s);

out:
qemu_opts_del(opts);
Expand Down Expand Up @@ -438,8 +438,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
qemu_aio_release(acb);
close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
NULL);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);
bs->drv = NULL; /* Make the disk inaccessible */
qemu_mutex_unlock_iothread();
}
Expand Down Expand Up @@ -595,7 +594,7 @@ static void qemu_gluster_close(BlockDriverState *bs)

close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL);

if (s->fd) {
glfs_close(s->fd);
Expand Down
3 changes: 1 addition & 2 deletions block/iscsi.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ iscsi_set_events(IscsiLun *iscsilun)
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi),
iscsi_process_read,
(ev & POLLOUT) ? iscsi_process_write : NULL,
NULL,
iscsilun);

}
Expand Down Expand Up @@ -1208,7 +1207,7 @@ static void iscsi_close(BlockDriverState *bs)
qemu_del_timer(iscsilun->nop_timer);
qemu_free_timer(iscsilun->nop_timer);
}
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL);
iscsi_destroy_context(iscsi);
memset(iscsilun, 0, sizeof(IscsiLun));
}
Expand Down
3 changes: 1 addition & 2 deletions block/linux-aio.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,7 @@ void *laio_init(void)
goto out_close_efd;
}

qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb,
NULL);
qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb);

return s;

Expand Down
11 changes: 4 additions & 7 deletions block/nbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,

qemu_co_mutex_lock(&s->send_mutex);
s->send_coroutine = qemu_coroutine_self();
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
NULL, s);
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s);
if (qiov) {
if (!s->is_unix) {
socket_set_cork(s->sock, 1);
Expand All @@ -354,8 +353,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
} else {
rc = nbd_send_request(s->sock, request);
}
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL,
NULL, s);
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
Expand Down Expand Up @@ -431,8 +429,7 @@ static int nbd_establish_connection(BlockDriverState *bs)
/* Now that we're connected, set the socket to be non-blocking and
* kick the reply mechanism. */
qemu_set_nonblock(sock);
qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL,
NULL, s);
qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s);

s->sock = sock;
s->size = size;
Expand All @@ -452,7 +449,7 @@ static void nbd_teardown_connection(BlockDriverState *bs)
request.len = 0;
nbd_send_request(s->sock, &request);

qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
closesocket(s->sock);
}

Expand Down
4 changes: 2 additions & 2 deletions block/rbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags)
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, NULL, s);
NULL, s);


qemu_opts_del(opts);
Expand All @@ -569,7 +569,7 @@ static void qemu_rbd_close(BlockDriverState *bs)

close(s->fds[0]);
close(s->fds[1]);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL);

rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
Expand Down
18 changes: 8 additions & 10 deletions block/sheepdog.c
Original file line number Diff line number Diff line change
Expand Up @@ -531,14 +531,14 @@ static coroutine_fn void do_co_req(void *opaque)
unsigned int *rlen = srco->rlen;

co = qemu_coroutine_self();
qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co);
qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co);

ret = send_co_req(sockfd, hdr, data, wlen);
if (ret < 0) {
goto out;
}

qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co);
qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co);

ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
if (ret < sizeof(*hdr)) {
Expand All @@ -563,7 +563,7 @@ static coroutine_fn void do_co_req(void *opaque)
out:
/* there is at most one request for this sockfd, so it is safe to
* set each handler to NULL. */
qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL);

srco->ret = ret;
srco->finished = true;
Expand Down Expand Up @@ -804,7 +804,7 @@ static int get_sheep_fd(BDRVSheepdogState *s)
return fd;
}

qemu_aio_set_fd_handler(fd, co_read_response, NULL, NULL, s);
qemu_aio_set_fd_handler(fd, co_read_response, NULL, s);
return fd;
}

Expand Down Expand Up @@ -1054,8 +1054,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,

qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
NULL, s);
qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s);
socket_set_cork(s->fd, 1);

/* send a header */
Expand All @@ -1076,8 +1075,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
}

socket_set_cork(s->fd, 0);
qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
NULL, s);
qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s);
qemu_co_mutex_unlock(&s->lock);

return 0;
Expand Down Expand Up @@ -1335,7 +1333,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags)
g_free(buf);
return 0;
out:
qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
if (s->fd >= 0) {
closesocket(s->fd);
}
Expand Down Expand Up @@ -1563,7 +1561,7 @@ static void sd_close(BlockDriverState *bs)
error_report("%s, %s", sd_strerror(rsp->result), s->name);
}

qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL);
closesocket(s->fd);
g_free(s->host_spec);
}
Expand Down
4 changes: 2 additions & 2 deletions block/ssh.c
Original file line number Diff line number Diff line change
Expand Up @@ -758,13 +758,13 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s)
DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock,
rd_handler, wr_handler);

qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, NULL, co);
qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co);
}

static coroutine_fn void clear_fd_handler(BDRVSSHState *s)
{
DPRINTF("s->sock=%d", s->sock);
qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL);
qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL);
}

/* A non-blocking call returned EAGAIN, so yield, ensuring the
Expand Down
8 changes: 4 additions & 4 deletions hw/block/dataplane/virtio-blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,15 +472,15 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
exit(1);
}
s->host_notifier = *virtio_queue_get_host_notifier(vq);
aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, NULL);
aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);

/* Set up ioqueue */
ioq_init(&s->ioqueue, s->fd, REQ_MAX);
for (i = 0; i < ARRAY_SIZE(s->requests); i++) {
ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
}
s->io_notifier = *ioq_get_notifier(&s->ioqueue);
aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, NULL);
aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);

s->started = true;
trace_virtio_blk_data_plane_start(s);
Expand Down Expand Up @@ -512,10 +512,10 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
qemu_thread_join(&s->thread);
}

aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL);
aio_set_event_notifier(s->ctx, &s->io_notifier, NULL);
ioq_cleanup(&s->ioqueue);

aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL);
aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
k->set_host_notifier(qbus->parent, 0, false);

aio_context_unref(s->ctx);
Expand Down
14 changes: 2 additions & 12 deletions include/block/aio.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,6 @@ typedef struct AioContext {
struct ThreadPool *thread_pool;
} AioContext;

/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);

/**
* aio_context_new: Allocate a new AioContext.
*
Expand Down Expand Up @@ -198,9 +195,6 @@ bool aio_pending(AioContext *ctx);
bool aio_poll(AioContext *ctx, bool blocking);

#ifdef CONFIG_POSIX
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
typedef int (AioFlushHandler)(void *opaque);

/* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
* be invoked when using qemu_aio_wait().
Expand All @@ -212,7 +206,6 @@ void aio_set_fd_handler(AioContext *ctx,
int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque);
#endif

Expand All @@ -225,8 +218,7 @@ void aio_set_fd_handler(AioContext *ctx,
*/
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
EventNotifierHandler *io_read);

/* Return a GSource that lets the main loop poll the file descriptors attached
* to this AioContext.
Expand All @@ -240,14 +232,12 @@ struct ThreadPool *aio_get_thread_pool(AioContext *ctx);

bool qemu_aio_wait(void);
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
EventNotifierHandler *io_read);

#ifdef CONFIG_POSIX
void qemu_aio_set_fd_handler(int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque);
#endif

Expand Down
9 changes: 3 additions & 6 deletions main-loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,17 +489,14 @@ bool qemu_aio_wait(void)
void qemu_aio_set_fd_handler(int fd,
IOHandler *io_read,
IOHandler *io_write,
AioFlushHandler *io_flush,
void *opaque)
{
aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush,
opaque);
aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque);
}
#endif

void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush)
EventNotifierHandler *io_read)
{
aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush);
aio_set_event_notifier(qemu_aio_context, notifier, io_read);
}
Loading

0 comments on commit f2e5dca

Please sign in to comment.