async: remove unnecessary inc/dec pairs
Pull the increment/decrement pair out of aio_bh_poll and into the callers. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170213135235.12274-18-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
		
							parent
							
								
									a153bf52b3
								
							
						
					
					
						commit
						bd451435c0
					
				@ -425,9 +425,8 @@ static bool aio_dispatch_handlers(AioContext *ctx)
 | 
			
		||||
 | 
			
		||||
void aio_dispatch(AioContext *ctx)
 | 
			
		||||
{
 | 
			
		||||
    aio_bh_poll(ctx);
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_inc(&ctx->list_lock);
 | 
			
		||||
    aio_bh_poll(ctx);
 | 
			
		||||
    aio_dispatch_handlers(ctx);
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
@ -679,16 +678,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    npfd = 0;
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
    progress |= aio_bh_poll(ctx);
 | 
			
		||||
 | 
			
		||||
    if (ret > 0) {
 | 
			
		||||
        qemu_lockcnt_inc(&ctx->list_lock);
 | 
			
		||||
        progress |= aio_dispatch_handlers(ctx);
 | 
			
		||||
        qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
    progress |= timerlistgroup_run_timers(&ctx->tlg);
 | 
			
		||||
 | 
			
		||||
    return progress;
 | 
			
		||||
 | 
			
		||||
@ -253,8 +253,6 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
 | 
			
		||||
    bool progress = false;
 | 
			
		||||
    AioHandler *tmp;
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_inc(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
     * We have to walk very carefully in case aio_set_fd_handler is
 | 
			
		||||
     * called while we're walking.
 | 
			
		||||
@ -305,14 +303,15 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
    return progress;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void aio_dispatch(AioContext *ctx)
 | 
			
		||||
{
 | 
			
		||||
    qemu_lockcnt_inc(&ctx->list_lock);
 | 
			
		||||
    aio_bh_poll(ctx);
 | 
			
		||||
    aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
    timerlistgroup_run_timers(&ctx->tlg);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -349,7 +348,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
    first = true;
 | 
			
		||||
 | 
			
		||||
    /* ctx->notifier is always registered.  */
 | 
			
		||||
@ -392,6 +390,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
 | 
			
		||||
        progress |= aio_dispatch_handlers(ctx, event);
 | 
			
		||||
    } while (count > 0);
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
    progress |= timerlistgroup_run_timers(&ctx->tlg);
 | 
			
		||||
    return progress;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										12
									
								
								util/async.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								util/async.c
									
									
									
									
									
								
							@ -90,15 +90,16 @@ void aio_bh_call(QEMUBH *bh)
 | 
			
		||||
    bh->cb(bh->opaque);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
 | 
			
		||||
/* Multiple occurrences of aio_bh_poll cannot be called concurrently.
 | 
			
		||||
 * The count in ctx->list_lock is incremented before the call, and is
 | 
			
		||||
 * not affected by the call.
 | 
			
		||||
 */
 | 
			
		||||
int aio_bh_poll(AioContext *ctx)
 | 
			
		||||
{
 | 
			
		||||
    QEMUBH *bh, **bhp, *next;
 | 
			
		||||
    int ret;
 | 
			
		||||
    bool deleted = false;
 | 
			
		||||
 | 
			
		||||
    qemu_lockcnt_inc(&ctx->list_lock);
 | 
			
		||||
 | 
			
		||||
    ret = 0;
 | 
			
		||||
    for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
 | 
			
		||||
        next = atomic_rcu_read(&bh->next);
 | 
			
		||||
@ -123,11 +124,10 @@ int aio_bh_poll(AioContext *ctx)
 | 
			
		||||
 | 
			
		||||
    /* remove deleted bhs */
 | 
			
		||||
    if (!deleted) {
 | 
			
		||||
        qemu_lockcnt_dec(&ctx->list_lock);
 | 
			
		||||
        return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
 | 
			
		||||
    if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
 | 
			
		||||
        bhp = &ctx->first_bh;
 | 
			
		||||
        while (*bhp) {
 | 
			
		||||
            bh = *bhp;
 | 
			
		||||
@ -138,7 +138,7 @@ int aio_bh_poll(AioContext *ctx)
 | 
			
		||||
                bhp = &bh->next;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        qemu_lockcnt_unlock(&ctx->list_lock);
 | 
			
		||||
        qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user