
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
203 lines
6.7 KiB
C
203 lines
6.7 KiB
C
/*
|
|
* QEMU Hyper-V Dynamic Memory Protocol driver
|
|
*
|
|
* Copyright (C) 2020-2023 Oracle and/or its affiliates.
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "hv-balloon-internal.h"
|
|
#include "hv-balloon-our_range_memslots.h"
|
|
#include "trace.h"
|
|
|
|
/* OurRange */
|
|
static void our_range_init(OurRange *our_range, uint64_t start, uint64_t count)
|
|
{
|
|
assert(count <= UINT64_MAX - start);
|
|
our_range->range.start = start;
|
|
our_range->range.count = count;
|
|
|
|
hvb_page_range_tree_init(&our_range->removed_guest);
|
|
hvb_page_range_tree_init(&our_range->removed_both);
|
|
|
|
/* mark the whole range as unused but for potential use */
|
|
our_range->added = 0;
|
|
our_range->unusable_tail = 0;
|
|
}
|
|
|
|
static void our_range_destroy(OurRange *our_range)
|
|
{
|
|
hvb_page_range_tree_destroy(&our_range->removed_guest);
|
|
hvb_page_range_tree_destroy(&our_range->removed_both);
|
|
}
|
|
|
|
void hvb_our_range_clear_removed_trees(OurRange *our_range)
|
|
{
|
|
hvb_page_range_tree_destroy(&our_range->removed_guest);
|
|
hvb_page_range_tree_destroy(&our_range->removed_both);
|
|
hvb_page_range_tree_init(&our_range->removed_guest);
|
|
hvb_page_range_tree_init(&our_range->removed_both);
|
|
}
|
|
|
|
void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size)
|
|
{
|
|
assert(additional_size <= UINT64_MAX - our_range->added);
|
|
|
|
our_range->added += additional_size;
|
|
|
|
assert(our_range->added <= UINT64_MAX - our_range->unusable_tail);
|
|
assert(our_range->added + our_range->unusable_tail <=
|
|
our_range->range.count);
|
|
}
|
|
|
|
/* OurRangeMemslots */
|
|
static void our_range_memslots_init_slots(OurRangeMemslots *our_range,
|
|
MemoryRegion *backing_mr,
|
|
Object *memslot_owner)
|
|
{
|
|
OurRangeMemslotsSlots *memslots = &our_range->slots;
|
|
unsigned int idx;
|
|
uint64_t memslot_offset;
|
|
|
|
assert(memslots->count > 0);
|
|
memslots->slots = g_new0(MemoryRegion, memslots->count);
|
|
|
|
/* Initialize our memslots, but don't map them yet. */
|
|
assert(memslots->size_each > 0);
|
|
for (idx = 0, memslot_offset = 0; idx < memslots->count;
|
|
idx++, memslot_offset += memslots->size_each) {
|
|
uint64_t memslot_size;
|
|
g_autofree char *name = NULL;
|
|
|
|
/* The size of the last memslot might be smaller. */
|
|
if (idx == memslots->count - 1) {
|
|
uint64_t region_size;
|
|
|
|
assert(our_range->mr);
|
|
region_size = memory_region_size(our_range->mr);
|
|
memslot_size = region_size - memslot_offset;
|
|
} else {
|
|
memslot_size = memslots->size_each;
|
|
}
|
|
|
|
name = g_strdup_printf("memslot-%u", idx);
|
|
memory_region_init_alias(&memslots->slots[idx], memslot_owner, name,
|
|
backing_mr, memslot_offset, memslot_size);
|
|
/*
|
|
* We want to be able to atomically and efficiently activate/deactivate
|
|
* individual memslots without affecting adjacent memslots in memory
|
|
* notifiers.
|
|
*/
|
|
memory_region_set_unmergeable(&memslots->slots[idx], true);
|
|
}
|
|
|
|
memslots->mapped_count = 0;
|
|
}
|
|
|
|
OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
|
|
MemoryRegion *parent_mr,
|
|
MemoryRegion *backing_mr,
|
|
Object *memslot_owner,
|
|
unsigned int memslot_count,
|
|
uint64_t memslot_size)
|
|
{
|
|
OurRangeMemslots *our_range;
|
|
|
|
our_range = g_malloc(sizeof(*our_range));
|
|
our_range_init(&our_range->range,
|
|
addr / HV_BALLOON_PAGE_SIZE,
|
|
memory_region_size(parent_mr) / HV_BALLOON_PAGE_SIZE);
|
|
our_range->slots.size_each = memslot_size;
|
|
our_range->slots.count = memslot_count;
|
|
our_range->mr = parent_mr;
|
|
our_range_memslots_init_slots(our_range, backing_mr, memslot_owner);
|
|
|
|
return our_range;
|
|
}
|
|
|
|
static void our_range_memslots_free_memslots(OurRangeMemslots *our_range)
|
|
{
|
|
OurRangeMemslotsSlots *memslots = &our_range->slots;
|
|
unsigned int idx;
|
|
uint64_t offset;
|
|
|
|
memory_region_transaction_begin();
|
|
for (idx = 0, offset = 0; idx < memslots->mapped_count;
|
|
idx++, offset += memslots->size_each) {
|
|
trace_hv_balloon_unmap_slot(idx, memslots->count, offset);
|
|
assert(memory_region_is_mapped(&memslots->slots[idx]));
|
|
memory_region_del_subregion(our_range->mr, &memslots->slots[idx]);
|
|
}
|
|
memory_region_transaction_commit();
|
|
|
|
for (idx = 0; idx < memslots->count; idx++) {
|
|
object_unparent(OBJECT(&memslots->slots[idx]));
|
|
}
|
|
|
|
g_clear_pointer(&our_range->slots.slots, g_free);
|
|
}
|
|
|
|
void hvb_our_range_memslots_free(OurRangeMemslots *our_range)
|
|
{
|
|
OurRangeMemslotsSlots *memslots = &our_range->slots;
|
|
MemoryRegion *hostmem_mr;
|
|
RAMBlock *rb;
|
|
|
|
assert(our_range->slots.count > 0);
|
|
assert(our_range->slots.slots);
|
|
|
|
hostmem_mr = memslots->slots[0].alias;
|
|
rb = hostmem_mr->ram_block;
|
|
ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
|
|
|
|
our_range_memslots_free_memslots(our_range);
|
|
our_range_destroy(&our_range->range);
|
|
g_free(our_range);
|
|
}
|
|
|
|
void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
|
|
uint64_t additional_map_size)
|
|
{
|
|
OurRangeMemslotsSlots *memslots = &our_range->slots;
|
|
uint64_t total_map_size;
|
|
unsigned int idx;
|
|
uint64_t offset;
|
|
|
|
total_map_size = (our_range->range.added + additional_map_size) *
|
|
HV_BALLOON_PAGE_SIZE;
|
|
idx = memslots->mapped_count;
|
|
assert(memslots->size_each > 0);
|
|
offset = idx * memslots->size_each;
|
|
|
|
/*
|
|
* Activate all memslots covered by the newly added region in a single
|
|
* transaction.
|
|
*/
|
|
memory_region_transaction_begin();
|
|
for ( ; idx < memslots->count;
|
|
idx++, offset += memslots->size_each) {
|
|
/*
|
|
* If this memslot starts beyond or at the end of the range to map so
|
|
* does every next one.
|
|
*/
|
|
if (offset >= total_map_size) {
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* Instead of enabling/disabling memslot, we add/remove them. This
|
|
* should make address space updates faster, because we don't have to
|
|
* loop over many disabled subregions.
|
|
*/
|
|
trace_hv_balloon_map_slot(idx, memslots->count, offset);
|
|
assert(!memory_region_is_mapped(&memslots->slots[idx]));
|
|
memory_region_add_subregion(our_range->mr, offset,
|
|
&memslots->slots[idx]);
|
|
|
|
memslots->mapped_count++;
|
|
}
|
|
memory_region_transaction_commit();
|
|
}
|