Page not found :(
The page you are looking for doesn't exist or has been moved.
diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index dad6b582..00000000 --- a/.editorconfig +++ /dev/null @@ -1,11 +0,0 @@ -# editorconfig.org - -root = true - -[*] -indent_style = space -indent_size = 2 -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true \ No newline at end of file diff --git a/.eslintignore b/.eslintignore deleted file mode 100644 index 57d00579..00000000 --- a/.eslintignore +++ /dev/null @@ -1,4 +0,0 @@ -assets/js/index.js -assets/js/katex.js -assets/js/vendor -node_modules \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json deleted file mode 100644 index c926994d..00000000 --- a/.eslintrc.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "env": { - "browser": true, - "commonjs": true, - "es6": true, - "node": true - }, - "extends": "eslint:recommended", - "globals": { - "Atomics": "readonly", - "SharedArrayBuffer": "readonly" - }, - "parserOptions": { - "ecmaVersion": 2018, - "sourceType": "module" - }, - "rules": { - "no-console": 0, - "quotes": ["error", "single"], - "comma-dangle": [ - "error", - { - "arrays": "always-multiline", - "objects": "always-multiline", - "imports": "always-multiline", - "exports": "always-multiline", - "functions": "ignore" - } - ] - } -} \ No newline at end of file diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index b15c6e6b..00000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,12 +0,0 @@ -# These are supported funding model platforms - -github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] -patreon: # Replace with a single Patreon username -open_collective: doks # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username -tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel -community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry -liberapay: # Replace with a single Liberapay username -issuehunt: # Replace with a single IssueHunt username -otechie: # Replace with a single Otechie username -custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/ISSUE_TEMPLATE/bug-report---.md b/.github/ISSUE_TEMPLATE/bug-report---.md deleted file mode 100644 index 6a8b3a84..00000000 --- a/.github/ISSUE_TEMPLATE/bug-report---.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -name: "Bug report \U0001F41E" -about: Create a report to help us improve - ---- - -## Description - -Describe the issue that you're seeing. - -### Steps to reproduce - -Clear steps describing how to reproduce the issue. Please please please link to a demo project if possible, this makes your issue _much_ easier to diagnose (seriously). - -### Expected result - -What should happen? - -### Actual result - -What happened. - -### Environment - -Paste the information here as shown by `npm run check` diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index e3766187..00000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,4 +0,0 @@ -contact_links: - - name: Question 🙋 - url: https://github.com/h-enk/doks/discussions/categories/q-a - about: Ask your question in Doks Discussions \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature-request---.md b/.github/ISSUE_TEMPLATE/feature-request---.md deleted file mode 100644 index 74da274c..00000000 --- a/.github/ISSUE_TEMPLATE/feature-request---.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -name: "Feature request \U0001F680" -about: Suggest an idea for Doks - ---- - -## Summary - -Brief explanation of the feature. - -### Basic example - -Include a basic example or links here. - -### Motivation - -Why are we doing this? What use cases does it support? What is the expected outcome? diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 8abca405..00000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,11 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates - -version: 2 -updates: - - package-ecosystem: "npm" # See documentation for possible values - directory: "/" # Location of package manifests - schedule: - interval: "daily" diff --git a/.github/workflows/dco.yaml b/.github/workflows/dco.yaml deleted file mode 100644 index 531eaf55..00000000 --- a/.github/workflows/dco.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: DCO -on: - pull_request: -jobs: - check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.x - uses: actions/setup-python@v1 - with: - python-version: '3.x' - - name: Check DCO - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - pip3 install -U dco-check - dco-check -e "49699333+dependabot[bot]@users.noreply.github.com" diff --git a/.github/workflows/deploy-github.yml b/.github/workflows/deploy-github.yml deleted file mode 100644 index aca2b21c..00000000 --- a/.github/workflows/deploy-github.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: GitHub Pages - -on: - push: - branches: - - main - -jobs: - deploy: - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 - with: - node-version: '20' - cache: 'npm' - - - name: Install dependencies - run: npm install - -# - name: Check for linting errors -# run: npm test - - - name: Build production website - run: npm run build - - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./public diff --git a/.github/workflows/node.js-ci.yml b/.github/workflows/node.js-ci.yml deleted file mode 100644 index f692a7a5..00000000 --- a/.github/workflows/node.js-ci.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Cloud Hypervisor Website Test - -on: - push: - branches: main - pull_request: - branches: main - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - name: Check out - uses: actions/checkout@v2 - - - name: Set up Node.js ${{ matrix.node }} - uses: actions/setup-node@v2 - with: - node-version: ${{ matrix.node }} - - - name: Install dependencies - run: npm ci - -# - name: Run test script -# run: npm test - - - name: Build production website - run: npm run build diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8f187f04..00000000 --- a/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -node_modules -public -resources -# Local Netlify folder -.netlify -TODO \ No newline at end of file diff --git a/.markdownlint.json b/.markdownlint.json deleted file mode 100644 index a8b885d0..00000000 --- a/.markdownlint.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "comment": "Hyas rules", - - "default": true, - "line_length": false, - "no-inline-html": false, - "no-trailing-punctuation": false, - "no-duplicate-heading": false, - "no-bare-urls": false -} \ No newline at end of file diff --git a/.markdownlintignore b/.markdownlintignore deleted file mode 100644 index a0380d65..00000000 --- a/.markdownlintignore +++ /dev/null @@ -1,3 +0,0 @@ -node_modules -CHANGELOG.md -README.md \ No newline at end of file diff --git a/assets/fonts/.gitkeep b/.nojekyll similarity index 100% rename from assets/fonts/.gitkeep rename to .nojekyll diff --git a/.stylelintignore b/.stylelintignore deleted file mode 100644 index 3972095a..00000000 --- a/.stylelintignore +++ /dev/null @@ -1,3 +0,0 @@ -assets/scss/components/_syntax.scss -assets/scss/vendor -node_modules \ No newline at end of file diff --git a/.stylelintrc.json b/.stylelintrc.json deleted file mode 100644 index 1490802c..00000000 --- a/.stylelintrc.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "extends": "stylelint-config-standard", - "rules": { - "no-empty-source": null, - "string-quotes": "double", - "at-rule-no-unknown": [ - true, - { - "ignoreAtRules": [ - "extend", - "at-root", - "debug", - "warn", - "error", - "if", - "else", - "for", - "each", - "while", - "mixin", - "include", - "content", - "return", - "function", - "tailwind", - "apply", - "responsive", - "variants", - "screen" - ] - } - ] - } -} \ No newline at end of file diff --git a/404.html b/404.html new file mode 100644 index 00000000..ea49f859 --- /dev/null +++ b/404.html @@ -0,0 +1,5 @@ +
The page you are looking for doesn't exist or has been moved.
-
-
-
-
- Doks is a Hugo theme for building secure, fast, and SEO-ready documentation websites, which you can easily update and customize. -
- -
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Posted May 19, 2022 by Sebastien Boeuf ‐ 7 min read
There are use cases for which a workload needs to access specific hardware such +as accelerators, GPU or network adapters to maximise potential performance. And +since these workloads run inside virtual machines (VM) for security reasons, the +challenge is to make this hardware available from within the VM, but without +degrading the performance that can be achieved from bare metal.
This is a mature framework allowing a PCI device to be bound to the vfio-pci
+driver instead of the default driver it is usually attached to.
It exposes an ioctl interface so that a userspace program can interact with the +device and retrieve all the information it needs. In the virtualization case, +this userspace program is the Virtual Machine Monitor (VMM), Cloud Hypervisor +for instance, which after it got all the information from the device, exposes +them to the guest. And that is how the workload can access the device inside the +VM, exactly the same way it would if it was directly running on bare metal.
Now that we’ve covered the functional aspect, let’s have a look at the way we +can achieve bare metal performance from the guest.
The main idea for not degrading performance is to avoid as much as possible to +trigger VM exits when the workload interacts with the device, as well as when +it receives interrupts from it.
First, we can look at the PCI Base Address Registers (BAR). We need to prevent +a BAR access from generating a VM exit. This is done by mapping the device +region directly into the VMM virtual address space, and make this same region a +user memory region at the hypervisor level. This means we tell KVM to inform +the CPU it can find the pages related to this region directly at a specific +address in physical RAM. Once this is properly configured, any memory access +from guest workload to one of the device’s PCI BARs will result in a direct +access to physical RAM from the CPU as it can find the pages from the Extended +Page Table (EPT).
There’s one small part of the BARs for which we can’t avoid VM exits, the MSI-X +tables. MSI-X specification expects the vector table and pending bit array to be +part of the device BARs. But the VMM needs to trap every access to these tables +since it’s responsible for configuring the interrupts both at the KVM and VFIO +levels. This is important to note this doesn’t impact the performance of the +device when being used by the guest. The interrupt vectors are only configured +during the initialization of the device driver, which is already completed at +the time the workload interacts with the device.
The second aspect, the main one actually, is how Direct Memory Access (DMA) is +performed securely without generating any VM exit. DMA is the standard way for +a device to access physical RAM without involving CPU cycles, which allows the +device to achieve very high performance. In the context of virtualization, we +want to reuse this mechanism, but without allowing a malicious guest to use this +device to reach every address from physical RAM. That’s where the IOMMU comes +into play, as the gatekeeper of RAM accesses from any device on the machine. +The VMM is responsible for configuring the IOMMU through the VFIO interface. It +provides the set of pages that can be accessed from a specific device, which +usually means the entire guest RAM. This information is stored in the DMA +Remapping (DMAR) table as part of the IOMMU. Whenever the workload initiate a +DMA transfer between the device and the guest RAM, the IOMMU will allow such +access, but if the address is outside the authorized range, the IOMMU will +prevent the transfer from happening. That’s how security is enforced in the +virtualization context.
Since DMA doesn’t involve CPU to access the memory, a DMA transfer doesn’t +trigger any VM exit, providing identical performance to what can be observed +on bare metal.
Third and last aspect, the interrupts. Depending on the type of device, we can +see a lot of interrupts being generated, which can trigger a large amount of VM +exits, affecting directly the performance of the device inside the VM.
The way to avoid such disruption is by relying on fairly recent hardware to +leverage a feature called Posted Interrupts (PI). This is both part of the CPU +with the virtual APIC (APICv) and the IOMMU through the Interrupt Remapping (IR) +table. When the hardware supports it, the hypervisor on behalf of the VMM will +configure the IOMMU by adding new entries to the IR table. Later on, whenever +an interrupt is triggered, the IOMMU will check the IR table to find out the PI +descriptor associated with it. It then triggers the correct interrupt inside the +guest relying on the APICv. This whole chain of events is entirely handled in +hardware, meaning there’s no VM exit involved, which doesn’t lead to any drop +in performance.
For more details on how to use VFIO with Cloud Hypervisor, refer to the +following documentation.
vDPA stands for virtio Data Path Acceleration, and the main reason this new +framework exists is its ability to simplify the migration process. The whole +concept behind vDPA relies on the virtio specification, which is what makes it +more suited for migration.
On the one hand, the virtio control path is used for configuring the virtqueues
+and getting information about the device. This is achieved through the ioctl
+interface that is accessible through the device node /dev/vhost-vdpa. The
+userspace program, the VMM again, retrieves all the information it needs so it
+can expose the correct type of virtio device to the guest, with the right amount
+of queues, the size of each queue, and the virtio configuration associated with.
On the other hand, the virtio data path, effectively the virtqueues themselves, +is used to transfer the data between frontend in the guest and backend from the +host. But in this very special case, the virtqueues are directly part of the +physical device, allowing DMA transfers to be initiated by the guest. And the +same way it works with VFIO because the device is also attached to a physical +IOMMU, the guest can perform secure and efficient memory accesses from the +device.
Providing a simpler path for migrating physical devices from one VM to another +is one of the main reason vDPA was created. Because of the design relying on +virtqueues, most of the migration complexity can be handled by the VMM, without +the need for the vDPA device to provide an opaque blob of data that will have to +be restored on the destination VM. That’s where it differentiates from the VFIO +approach, which will require every vendor to implement the VFIO migration API as +part of their driver, providing an opaque blob specific to each device.
It’s important to note that VFIO migration API is very recent and not heavily +tested and deployed, meaning you might want to wait for a bit before jumping +to it.
For vDPA, which is a very recent addition to the Linux kernel, the migration +isn’t fully figured out yet by the maintainers since it still requires some +extra support through the virtio specification that will allow to stop a device +before it can be migrated. +There’s an alternative approach implemented in software part of the Data Plan +Development Kit (DPDK). It shadows the virtqueues to have complete knowledge of +what is happening inside the queues, and therefore at any point in time it can +stop the device and start migrating.
For more details on how to use vDPA with Cloud Hypervisor, refer to the +following documentation.
Posted September 18, 2020 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.10.0 project.
Highlights for cloud-hypervisor version 0.10.0 include:
virtio-block Support for Multiple Descriptors Some virtio-block device drivers may generate requests with multiple descriptors and support has been added for those drivers.
Support has been added for fine grained control of memory allocation for the guest. This includes controlling the backing of sections of guest memory, assigning to specific host NUMA nodes and assigning memory and vCPUs to specific memory nodes inside the guest. Full details of this can be found in the memory documentation.
Seccomp Sandbox Improvements All the remaining threads and devices are now isolated within their own seccomp filters. This provides a layer of sandboxing and enhances the security model of cloud-hypervisor.
A new option (kvm_hyperv) has been added to --cpus to provide an option to toggle on KVM’s HyperV emulation support. This enables progress towards booting Windows without adding extra emulated devices.
ch-remote to resize the VM parameter now accepts the standard sizes suffices (#1596)cloud-hypervisor no longer panics when started with --memory hotplug_method=virtio-mem and no hotplug_size (#1564)--memory hotplug_method=virtio-mem (#1593)--version shows the version for released binaries (#1669)virtio devices are now printed out (#1551)Many thanks to everyone who has contributed to our 0.10.0 release including some new faces.
See the GitHub Release for the release assets.
Posted October 29, 2020 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the 0.11.0 project.
Highlights for cloud-hypervisor version 0.11.0 include:
io_uring support by default for virtio-block Provided that the host OS supports it (Linux kernel 5.8+) then io_uring will
+be used for a significantly higher performance block device.
This is the first release where we officially support Windows running as a +guest. Full details of how to setup the image and run Cloud Hypervisor with a +Windows guest can be found in the dedicated Windows +documentation.
vhost-user “Self Spawning” Deprecation Automatically spawning a vhost-user-net or vhost-user-block backend is now
+deprecated. Users of this functionality will receive a warning and should make
+adjustments. The functionality will be removed in the next release.
virtio-mmmio Removal Support for using the virtio-mmio transport, rather than using PCI, has been
+removed. This has been to simplify the code and significantly
+reduce the testing burden of the project.
When running on the ARM64 architecture snapshot and restore has now been +implemented.
The time to boot the Linux kernel has been significantly improved by the +identifying some areas of delays around PCI bus probing, IOAPIC programming and +MPTABLE issues. Full details can be seen in #1728.
SIGTERM/SIGINT Interrupt Signal Handling When the VMM process receives the SIGTERM or SIGINT signals then it will
+trigger the VMM process to cleanly deallocate resources before exiting. The
+guest VM will not be cleanly shutdown but the VMM process will clean up its
+resources.
The default logging level was changed to include warnings which should make it +easier to see potential issues. New logging +documentation was also added.
--balloon Parameter Added Control of the setup of virtio-balloon has been moved from --memory to its
+own dedicated parameter. This makes it easier to add more balloon specific
+controls without overloading --memory.
virtio-watchdog Support Support for using a new virtio-watchdog has been added which can be used to
+have the VMM reboot the guest if the guest userspace fails to ping the
+watchdog. This is enabled with --watchdog and requires kernel support.
CMD.EXE under Windows SAC (#1170)virtio-pmem withdiscard_writes=on no longer marks the guest memory as
+read only so avoids excessive VM exits (#1795)Many thanks to everyone who has contributed to our 0.11.0 release including some new faces.
See the GitHub Release for the release assets.
Posted December 10, 2020 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked through the 0.12.0 project.
Highlights for cloud-hypervisor version 0.12.0 include:
The use of --watchdog is now fully supported as is the ability to reboot the
+VM from within the guest when running Cloud Hypervisor on an ARM64 system.
vhost-user-net and vhost-user-block self spawning In order to use vhost-user-net or vhost-user-block backends the user is now
+responsible for starting the backend and providing the socket for the VMM to
+use. This functionality was deprecated in the last release and how now been
+removed.
vhost-user-fs backend The vhost-user-fs backend is no longer included in Cloud Hypervisor and it is
+instead hosted in it’s own
+repository
The vm.info HTTP API endpoint has been extended to include the details of the
+devices used by the VM including any VFIO devices used.
Many thanks to everyone who has contributed to our 0.12.0 release:
See the GitHub Release for the release assets.
Posted February 12, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.13.0 project.
Highlights for cloud-hypervisor version 0.13.0 include:
It is now possible to use Cloud Hypervisor’s VFIO support to passthrough PCI +devices that do not support MSI or MSI-X and instead rely on INTx interrupts. +Most notably this widens the support to most NVIDIA cards with the proprietary +drivers.
Through the addition of hugepage_size on --memory it is now possible to
+specify the desired size of the huge pages used when allocating the guest
+memory. The user is required to ensure they have sufficient pages of the
+desired size in their pool.
It is now possible to provide file descriptors using the fd parameter to
+--net which point at TAP devices that have already been opened by the user.
+This aids integration with libvirt but also permits the use of MACvTAP
+support. This is documented in dedicated macvtap documentation.
It is now possible to use VHD (fixed) disk images as well as QCOWv2 and raw +disk image with Cloud Hypervisor.
Device threads are now derived from the main VMM thread which allows more +restrictive seccomp filters to be applied to them. The threads also have a +predictable name derived from the device id.
It is now possible to request that the guest VM shut itself down by triggering
+a synthetic ACPI power button press from the VMM. If the guest is listening for
+such an event (e.g. using systemd) then it will process the event and cleanly
+shut down. This functionality is exposed through the HTTP API and can be
+triggered via ch-remote --api-socket=<API socket> power-button.
Many thanks to everyone who has contributed to our 0.13.0 release including +some new faces.
See the GitHub Release for the release assets.
Posted March 26, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.14.0 project.
Highlights for cloud-hypervisor version 0.14.0 include:
A new option was added to the VMM --event-monitor which reports structured
+events (JSON) over a file or file descriptor at key events in the lifecycle of
+the VM. The list of events is limited at the moment but will be further
+extended over subsequent releases. The events exposed form part of the Cloud
+Hypervisor API surface.
Basic support has been added for running Windows guests atop the MSHV +hypervisor as an alternative to KVM and further improvements have been made to +the MSHV support.
The aarch64 platform has been enhanced with more devices exposed to the running +VM including an enhanced serial UART.
The documentation for the hotplug support has been updated to reflect the use
+of the ch-remote tool and to include details of virtio-mem based hotplug as
+well as documenting hotplug of paravirtualised and VFIO devices.
virtio-console The --serial and --console parameters can now direct the console to a PTY
+allowing programmatic control of the console from another process through the
+PTY subsystem.
The block device performance can now be constrained as part of the VM +configuration allowing rate limiting. Full details of the controls are in the +IO throttling doumentation.
Deprecated features will be removed in a subsequent release and users should plan to use alternatives
bzImage
+binaries has been deprecated. When using direct boot users should configure
+their kernel with CONFIG_PVH=y.Many thanks to everyone who has contributed to our 0.14.0 release including +some new faces.
See the GitHub Release for the release assets.
Posted March 31, 2021 by Cloud Hypervisor Team ‐ 1 min read
Bug fix release branched off the v0.14.0 release. The following bugs +were fixed in this release:
See the GitHub Release for the release assets.
Posted September 10, 2019 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.2.0 project.
Highlights for cloud-hypervisor version 0.2.0 include:
As part of our general effort to offload paravirtualized I/O to external
+processes, we added support for
+vhost-user-net backends. This
+enables cloud-hypervisor users to plug a vhost-user based networking device
+(e.g. DPDK) into the VMM as their virtio network backend.
In order to properly implement and guest reset and shutdown, we implemented
+a minimal version of the hardware-reduced ACPI specification. Together with
+a tiny I/O port based ACPI device, this allows cloud-hypervisor guests to
+cleanly reboot and shutdown.
The ACPI implementation is a cloud-hypervisor build time option that is
+enabled by default.
Based on the Firecracker idea of using a dedicated I/O port to measure guest +boot times, we added support for logging guest events through the +0x80 +PC debug port. This allows, among other things, for granular guest boot time +measurements. See our debug port documentation +for more details.
We fixed a major performance issue with our initial VFIO implementation: When
+enabling VT-d through the KVM and VFIO APIs, our guest memory writes and reads
+were (in many cases) not cached. After correctly tagging the guest memory from
+cloud-hypervisor we’re now able to reach the expected performance from
+directly assigned devices.
We added shared memory region with DAX +support to our virtio-fs shared file system. +This provides better shared filesystem IO performance with a smaller guest +memory footprint.
Thanks to our simple KVM firmware +improvements, we are now able to boot Ubuntu bionic images. We added those to +our CI pipeline.
See the GitHub Release for the release assets.
Posted October 18, 2019 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.3.0 project.
Highlights for cloud-hypervisor version 0.3.0 include:
We continue to work on offloading paravirtualized I/O to external processes, and we added support for vhost-user-blk backends.
+This enables cloud-hypervisor users to plug a vhost-user based block device like SPDK) into the VMM as their paravirtualized storage backend.
The previous release provided support for vhost-user-net backends. Now we also provide a TAP based vhost-user-net backend, implemented in Rust. Together with the vhost-user-net device implementation, this will eventually become the Cloud Hypervisor default paravirtualized networking architecture.
In order to more efficiently and securely communicate between host and guest, we added an hybrid implementation of the VSOCK socket address family over virtio. Credits go to the Firecracker project as our implementation is a copy of theirs.
In anticipation of the need to support asynchronous operations to Cloud Hypervisor guests (e.g. resources hotplug and guest migration), we added a HTTP based API to the VMM. The API will be more extensively documented during the next release cycle.
In order to support potential PCI-free use cases, we added support for the virtio MMIO transport layer. This will allow us to support simple, minimal guest configurations that do not require a PCI bus emulation.
As we want to improve our nested guests support, we added support for exposing a paravirtualized IOMMU device through virtio. This allows for a safer nested virtio and directly assigned devices support.
To add the IOMMU support, we had to make some CLI changes for Cloud Hypervisor users to be able to specify if devices had to be handled through this virtual IOMMU or not. In particular, the --disk option now expects disk paths to be prefixed with a path= string, and supports an optional iommu=[on|off]
+setting.
With the latest hypervisor firmware, we can now support the latest Ubuntu 19.10 (Eoan Ermine) cloud images.
After simplifying and changing our guest address space handling, we can now support guests with large amount of memory (more than 64GB).
See the GitHub Release for the release assets.
Posted December 13, 2019 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the 0.4.0 project.
Highlights for cloud-hypervisor version 0.4.0 include:
As a way to vertically scale Cloud-Hypervisor guests, we now support dynamically +adding virtual CPUs to the guests, a mechanism also known as CPU hot plug. +Through hardware-reduced ACPI notifications, Cloud Hypervisor can now add CPUs +to an already running guest and the high level operations for that process are +documented here
During the next release cycles we are planning to extend Cloud Hypervisor +hot plug framework to other resources, namely PCI devices and memory.
As part of the CPU hot plug feature enablement, and as a requirement for hot
+plugging other resources like devices or RAM, we added support for
+programmatically generating the needed ACPI tables. Through a dedicated
+acpi-tables crate, we now have a flexible and clean way of generating those
+tables based on the VMM device model and topology.
Our objective of running all Cloud Hypervisor paravirtualized I/O to a +vhost-user based framework is getting closer as we’ve added Rust based +implementations for vhost-user-blk and virtiofs backends. Together with the +vhost-user-net backend that came with the 0.3.0 release, this will form the +default Cloud Hypervisor I/O architecture.
As an initial requiremnt for enabling live migration, we added support for +pausing and resuming any VMM components. As an intermediate step towards live +migration, the upcoming guest snapshotting feature will be based on the pause +and resume capabilities.
As a way to simplify our device manager implementation, but also in order to +stay away from privileged rings as often as possible, any device that relies on +pin based interrupts will be using the userspace IOAPIC implementation by +default.
In order to allow for a more flexible device model, and also support guests +that would want to move PCI devices, we added support for PCI devices BAR +reprogramming.
cloud-hypervisor organization As we wanted to be more flexible on how we manage the Cloud Hypervisor project, +we decided to move it under a dedicated GitHub organization. +Together with the cloud-hypervisor +project, this new organization also now hosts our kernel +and firmware +repositories. We may also use it to host any rust-vmm that we’d need to +temporarily fork. +Thanks to GitHub’s seamless repository redirections, the move is completely +transparent to all Cloud Hypervisor contributors, users and followers.
Many thanks to everyone that contributed to the 0.4.0 release:
See the GitHub Release for the release assets.
Posted February 7, 2020 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.5.0 project.
Highlights for cloud-hypervisor version 0.5.0 include:
With 0.4.0 we added support for CPU hot plug, and 0.5.0 adds CPU hot unplug and memory hot plug as well. This allows to dynamically resize Cloud Hypervisor guests which is needed for e.g. Kubernetes related use cases. +The memory hot plug implementation is based on the same framework as the CPU hot plug/unplug one, i.e. hardware-reduced ACPI notifications to the guest.
Next on our VM resizing roadmap is the PCI devices hotplug feature.
We enhanced our virtio networking and block support by having both devices use multiple I/O queues handled by multiple threads. This improves our default paravirtualized networking and block devices throughput.
We improved our interrupt management implementation by introducing an Interrupt Manager framework, based on the currently on-going rust-vmm vm-device crates discussions. This move made the code significantly cleaner, and allowed us to remove several KVM related dependencies from crates like the PCI and virtio ones.
In order to provide a better developer experience, we worked on improving our build, development and testing tools. +Somehow similar to the excellent Firecracker’s devtool, we now provide a dev_cli script.
With this new tool, our users and contributors will be able to build and test Cloud Hypervisor through a containerized environment.
We spent some significant time and efforts debugging and fixing our integration with the Kata Containers project. Cloud Hypervisor is now a fully supported Kata Containers hypervisor, and is integrated into the project’s CI.
Many thanks to everyone that contributed to the 0.5.0 release:
See the GitHub Release for the release assets.
Posted March 20, 2020 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the 0.6.0 project.
Highlights for cloud-hypervisor version 0.6.0 include:
We continued our efforts around supporting dynamically changing the guest
+resources. After adding support for CPU and memory hotplug, Cloud Hypervisor
+now supports hot plugging and hot unplugging directly assigned (a.k.a. VFIO)
+devices into an already running guest. This closes the features gap for
+providing a complete Kata Containers workloads support with Cloud Hypervisor.
We enhanced our shared filesystem support through many virtio-fs improvements.
+By adding support for DAX, parallel processing of multiple requests, FS_IO,
+LSEEK and the MMIO virtio transport layer to our vhost_user_fs daemon, we
+improved our filesystem sharing performance, but also made it more stable and
+compatible with other virtio-fs implementations.
When choosing to offload the paravirtualized block and networking I/O to an
+external process (through the vhost-user protocol), Cloud Hypervisor now
+automatically spawns its default vhost-user-blk and vhost-user-net backends
+into their own, separate processes.
+This provides a seamless parvirtualized I/O user experience for those who want
+to run their guest I/O into separate executions contexts.
More and more Cloud Hypervisor services are exposed through the
+Rest API and thus only accessible via relatively cumbersome HTTP calls. In order
+to abstract those calls into a more user friendly tool, we created a Cloud
+Hypervisor Command Line Interface (CLI) called ch-remote.
+The ch-remote binary is created with each build and available e.g. at
+cloud-hypervisor/target/debug/ch-remote when doing a debug build.
Please check ch-remote --help for a complete description of all available
+commands.
In addition to the traditional Linux boot protocol, Cloud Hypervisor now +supports direct kernel booting through the PVH ABI.
With the 0.6.0 release, we are welcoming a few new contributors. Many thanks +to them and to everyone that contributed to this release:
See the GitHub Release for the release assets.
Posted April 30, 2020 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the 0.7.0 project.
Highlights for cloud-hypervisor version 0.7.0 include:
Further to our effort to support modifying a running guest we now support
+hotplug and unplug of the following virtio backed devices: block, network,
+pmem, virtio-fs and vsock. This functionality is available on the (default) PCI
+based tranport and is exposed through the HTTP API. The ch-remote utility
+provides a CLI for adding or removing these device types after the VM has
+booted. User can use the id parameter on the devices to choose names for
+devices to ease their removal.
libc Support Cloud Hypervisor can now be compiled with the musl C library and this release
+contains a static binary compiled using that toolchain.
vhost-user Backends The vhost-user backends for network and block support that are shipped by
+Cloud Hypervisor have been enhanced to support multiple threads and queues to
+improve throughput. These backends are used automatically if vhost_user=true
+is passed when the devices are created.
By passing the --initramfs command line option the user can specify a file to
+be loaded into the guest memory to be used as the kernel initial filesystem.
+This is usually used to allow the loading of drivers needed to be able to
+access the real root filesystem but it can also be used standalone for a very
+minimal image.
virtio-mem As well as supporting ACPI based hotplug Cloud Hypervisor now supports using
+the virtio-mem hotplug alternative. This can be controlled by the
+hotplug_method parameter on the --memory command line option. It currently
+requires kernel patches to be able to support it.
Seccomp Sandboxing Cloud Hypervisor now has support for restricting the system calls that the
+process can use via the seccomp security API. This on by default and is
+controlled by the --seccomp command line option.
With the release of Ubuntu 20.04 we have added that to the list of supported +distributions and is part of our regular testing programme.
This is non exhaustive list of HTTP API and command line changes
id fields added for devices to allow them to be named to ease removal.
+If no name is specified the VMM chooses one.--memory’s shared and hugepages controls for determining backing
+memory instead of providing a path.--vsock parameter only takes one device as the Linux kernel only
+supports a single Vsock device. The REST API has removed the vector for this
+option and replaced it with a single optional field.vhost-user backed
+device.ch-remote has added add-disk, add-fs, add-net, add-pmem and
+add-vsock subcommands. For removal remove-device is used. The REST API
+has appropriate new HTTP endpoints too.size with --pmem is no longer required and instead the size
+will be obtained from the file. A discard_writes option has also been added
+to provide the equivalent of a read-only file.--block-backend have been changed to more closely align
+with those used by --disk.Many thanks to everyone who has contributed to our 0.7.0 release including some new faces.
See the GitHub Release for the release assets.
Posted June 11, 2020 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the 0.8.0 project.
Highlights for cloud-hypervisor version 0.8.0 include:
This release includes the first version of the snapshot and restore feature. +This allows a VM to be paused and then subsequently snapshotted. At a later +point that snapshot may be restored into a new running VM identical to the +original VM at the point it was paused.
This feature can be used for offline migration from one VM host to another, to +allow the upgrading or rebooting of the host machine transparently to the guest +or for templating the VM. This is an experimental feature and cannot be used on +a VM using passthrough (VFIO) devices. Issues with SMP have also been observed +(#1176).
Included in this release is experimental support for running on ARM64.
+Currently only virtio-mmio devices and a serial port are supported. Full
+details can be found in the ARM64 documentation.
If the host supports it the guest is now enabled for 5-level paging (aka LA57).
+This works when booting the Linux kernel with a vmlinux, bzImage or firmware
+based boot. However booting an ELF kernel built with CONFIG_PVH=y does not
+work due to current limitations in the PVH boot process.
With virtio-net and vhost-user-net devices the guest can suppress
+interrupts from the VMM by using the VIRTIO_RING_F_EVENT_IDX feature. This
+can lead to an improvement in performance by reducing the number of interrupts
+the guest must service.
vhost_user_fs Improvements The implementation in Cloud Hypervisor of the VirtioFS server now supports sandboxing itself with seccomp.
tap device ahead of creating the VM it is not required to
+run the cloud-hypervisor binary with CAP_NET_ADMIN (#1273).virtio-block or vhost-user-block now correctly adheres to
+the specification and synchronizes to the underlying filesystem as required
+based on guest feature negotiation. This avoids potential data loss (#399,
+#1216).MPTABLE. When compiled with the acpi feature the
+MPTABLE will no longer be generated (#1132).mmio builds
+(#751).This is non exhaustive list of HTTP API and command line changes:
socket
+rather than sock in some cases.ch-remote tool now shows any error message generated by the VMMwce parameter has been removed from --disk as the feature is always
+offered for negotiation.--net has gained a host_mac option that allows the setting of the MAC
+address for the tap device on the host.Many thanks to everyone who has contributed to our 0.8.0 release including some new faces.
See the GitHub Release for the release assets.
Posted August 6, 2020 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the 0.9.0 project.
Highlights for cloud-hypervisor version 0.9.0 include:
io_uring Based Block Device Support If the io_uring feature is enabled and the host kernel supports it then io_uring will be used for block devices. This results a very significant performance improvement.
Statistics for activity of the virtio network and block devices is now exposed through a new vm.counters HTTP API entry point. These take the form of simple counters which can be used to observe the activity of the VM.
The HTTP API for adding devices now responds with the name that was assigned to the device as well the PCI BDF.
A topology parameter has been added to --cpus which allows the configuration of the guest CPU topology allowing the user to specify the numbers of sockets, packages per socket, cores per package and threads per core.
Our release build is now built with LTO (Link Time Optimization) which results in a ~20% reduction in the binary size.
A new abstraction has been introduced, in the form of a hypervisor crate so as to enable the support of additional hypervisors beyond KVM.
Multiple improvements have been made to the VM snapshot/restore support that was added in the last release. This includes persisting more vCPU state and in particular preserving the guest paravirtualized clock in order to avoid vCPU hangs inside the guest when running with multiple vCPUs.
A virtio-balloon device has been added, controlled through the resize control, which allows the reclamation of host memory by resizing a memory balloon inside the guest.
The ARM64 support introduced in the last release has been further enhanced with support for using PCI for exposing devices into the guest as well as multiple bug fixes. It also now supports using an initramfs when booting.
The guest can now use Intel SGX if the host supports it. Details can be found in the dedicated SGX documentation.
Seccomp Sandbox Improvements The most frequently used virtio devices are now isolated with their own seccomp filters. It is also now possible to pass --seccomp=log which result in the logging of requests that would have otherwise been denied to further aid development.
virtio-vsock implementation has been resynced with the implementation from Firecracker and includes multiple bug fixes.virtio-mmio based devices are now more widely tested (#275).virtio-console and the serial. (#1521)Many thanks to everyone who has contributed to our 0.9.0 release including some new faces.
See the GitHub Release for the release assets.
Posted April 29, 2021 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the v15.0 project.
Highlights for cloud-hypervisor version v15.0 include:
This release is the first in a new version numbering scheme to represent that +we believe Cloud Hypervisor is maturing and entering a period of stability. +With this new release we are beginning our new stability guarantees:
Currently the following items are not guaranteed across updates:
Building on our existing support for rate limiting block activity the network +device also now supports rate limiting. Full details of the controls are in the +IO throttling documentation.
virtio-net guest offload The guest is now able to change the offload settings for the virtio-net
+device. As well as providing a useful control this mitigates an issue in the
+Linux kernel where the guest will attempt to reprogram the offload settings
+even if they are not advertised as configurable (#2528).
--api-socket supports file descriptor parameter The --api-socket can now take an fd= parameter to specify an existing file
+descriptor to use. This is particularly beneficial for frameworks that need to
+programmatically control Cloud Hypervisor.
virtio-pmem (#2277).Deprecated features will be removed in a subsequent release and users should plan to use alternatives
bzImage
+binaries has been deprecated. When using direct boot users should configure
+their kernel with CONFIG_PVH=y. Will be removed in v16.0.Many thanks to everyone who has contributed to our release including some new faces.
See the GitHub Release for the release assets.
Posted June 10, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v16.0 project.
The live migration support inside Cloud Hypervisor has been improved with the addition of the tracking of dirty pages written by the VMM to complement the tracking of dirty pages made by the guest itself. Further the internal state of the VMM now is versioned which allows the safe migration of VMs from one version of the VMM to a newer one. However further testing is required so this should be done with care. See the live migration documentation for more details.
vhost-user support When using vhost-user to access devices implemented in different processes there is now support for reconnection of those devices in the case of a restart of the backend. In addition it is now possible to operate with the direction of the vhost-user-net connection reversed with the server in the VMM and the client in the backend. This is aligns with the default approach recommended by Open vSwitch.
Cloud Hypervisor now supports using ACPI and booting from a UEFI image on ARM64. This allows the use of stock OS images without direct kernel boot.
virtio-net queues than advertised is now supported. This appeared when using OVMF with an MQ enabled device (#2578).virtio devices Cloud Hypervisor now enforces a minimum vCPU count which ensures that the user will not see adverse guest performance (#2563).The following formerly deprecated features have been removed:
bzImage
+binaries has been deprecated. When using direct boot users should configure
+their kernel with CONFIG_PVH=y.Many thanks to everyone who has contributed to our release including some new faces.
See the GitHub Release for the release assets.
Posted July 22, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v17.0 +project.
The support for ACPI on ARM64 has been enhanced to include support for +specifying a NUMA configuration using the existing control options.
Seccomp support for MSHV backend The seccomp rules have now been extended to support running against the MSHV
+hypervisor backend.
macvtap devices Hotplug of macvtap devices is now supported with the file descriptor for the
+network device if opened by the user and passed to the VMM. The ch-remote
+tool supports this functionality when adding a network device.
The SGX support has been updated to match the latest Linux kernel support and +now supports SGX provisioning and associating EPC sections to NUMA nodes.
vhost-user devices Support for handling inflight tracking of I/O requests has been added to the
+vhost-user devices allowing recovery after device reconnection.
vhost-user devices no longer advertise the
+VIRTIO_F_RING_PACKED feature as they are not yet supported in the VMM
+(#2833).Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted September 9, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v18.0 project.
vfio-user) support Experimental support for running PCI devices in userspace via vfio-user
+has been included. This allows the use of the SPDK NVMe vfio-user controller
+with Cloud Hypervisor. This is enabled by --user-device on the command line.
vhost-user devices Devices exposed into the VM via vhost-user can now be migrated using the live
+migration support. This requires support from the backend however the commonly
+used DPDK vhost-user backend does support this.
Images using the VHDX disk image format can now be used with Cloud Hypervisor.
When running on the MSHV hypervisor it is possible to pass through devices from
+the host through to the guest (e.g with --device)
virtio-mem The reference Linux kernel we recommend for using with Cloud Hypervisor now supports virtio-mem on AArch64.
Live migration is now supported when running on the MSHV hypervisor including +efficient tracking of dirty pages.
The CPU topology (as configured through --cpu topology=) can now be
+configured on AArch64 platforms and is conveyed through either ACPI or device
+tree.
Use of the ACPI power button (e.g ch-remote --api-socket=<API socket> power-button)
+is now supported when running on AArch64.
--serial pty --console pty now works correctly (#3012)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted October 14, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v19.0 project.
virtio-console The PTY support for serial has been enhanced with improved buffering when the
+the PTY is not yet connected to. Using virtio-console with PTY now results in
+the console being resized if the PTY window is also resized.
Multiple optimisations have been made to the PCI handling resulting in +significant improvements in the boot time of the guest.
When using the latest TDVF firmware the ACPI tables created by the VMM are now +exposed via the firmware to the guest.
Live migration support has been enhanced to support migration with virtio-mem
+based memory hotplug and the virtio-balloon device now supports live
+migration.
virtio-mem support with vfio-user The use of vfio-user userspaces devices can now be used in conjunction with
+virtio-mem based memory hotplug and unplug.
virtio-iommu A paravirtualised IOMMU can now be used on the AArch64 platform.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 2, 2021 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v20.0 +project.
Cloud Hypervisor is no longer limited to 31 PCI devices. For both x86_64 and
+aarch64 architectures, it is now possible to create up to 16 PCI segments,
+increasing the total amount of supported PCI devices to 496.
For each vCPU, the user can define a limited set of host CPUs on which it is +allowed to run. This can be useful when assigning a 1:1 mapping between host and +guest resources, or when running a VM on a specific NUMA node.
Based on VFIO region capabilities, all regions can be memory mapped, limiting +the amount of triggered VM exits, and therefore increasing the performance of +the passthrough device.
Several sections containing unsafe Rust code have been replaced with safe +alternatives, and multiple comments have been added to clarify why the remaining +unsafe sections are safe to use.
The documentation related to VFIO has been updated while some new documents have
+been introduced to cover the usage of --cpus parameter as well as how to run
+Cloud Hypervisor on Intel TDX.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 13, 2021 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
virtio-net (#3450)vfio-user support (#3401)DeviceTree on restoreSee the GitHub Release for the release assets.
Posted January 4, 2022 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
SIGWINCH handler (for console resize)
+when this fails due to older kernel (#3456)SIGWINCH handler was not initialised
+(#3496)virtio-vsock blocking issue (#3497)See the GitHub Release for the release assets.
Posted January 20, 2022 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v21.0 project.
In order to support fast live upgrade of the VMM an optimised path has been added in which the memory for the VM is not compared from source to destination. This is activated by passing --local to the ch-remote send-migration command. This means that the live upgrade can complete in the order of 50ms vs 3s. (#3566)
Due to an issue in the virtio-net code in 5.14 the recommended Linux kernel is now 5.15. (#3530)
virtio-net desciptor chain (#3548)direct=on (O_DIRECT) can now be used with a guest that makes unaligned accesses (e.g. firmware) (#3587)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted March 11, 2022 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
openat() syscall from seccomp filter (#3609)virtio-net control queue (#3829)See the GitHub Release for the release assets.
Posted March 3, 2022 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked through the v22.0 +project.
Cloud Hypervisor can now be used as debug target with GDB. This is controlled
+by the gdb compile time feature and details of how to use it can be found in
+the gdb
+documentation.
virtio-iommu Backed Segments In order to facilitate hotplug devices that require being behind an IOMMU (e.g.
+QAT) there is a new option --platform iommu_segments=<list_of_segments> that
+will place all the specified segments behind the IOMMU.
It is now possible to change the VM configuration (e.g. add or remove devices, +resize) before the VM is booted.
virtio-balloon Free Page Reporting If --balloon free_page_reporting=on is used then the guest can report pages
+that is it not using to the VMM. The VMM will then notify the host OS that
+those pages are no longer in use and can be freed. This can result in improved
+memory density.
Through the use of TD-Shim lightweight firmware it is now possible to
+directly boot into the kernel with TDX. The TDX
+documentation
+has been updated for this usage.
A PMU is now available on AArch64 for guest performance profiling. This will be +exposed automatically if available from the host.
The documentation is now licensed under the “Creative Commons Attribution 4.0 +International” license which is aligned with the project charter under the +Linux Foundation.
virtiofsd The use of the Rust based virtiofsd +is now recommended and we are no longer testing against the C based “classic” +version.
AF_INET support (#3785)virtio-balloon size is now validated against guest RAM size (#3689)virtio-net device hotplug (#3607)vhost-user features are correct across migration (#3737)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted March 11, 2022 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
virtio-net control queue (#3829)See the GitHub Release for the release assets.
Posted April 14, 2022 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v23.0 +project.
A vDPA device has a datapath that complies with the virtio specification but
+with a vendor specific control path. The addition of --vdpa and the REST API
+equivalent allows the use of these devices with Cloud Hypervisor.
The list of officially supported and tested OS versions has been updated to +include Ubuntu “jammy” 22.04 and EOLed versions removed.
AArch64 Memory Map Improvements The memory map when running on AArch64 has been improved for the handling of
+the UEFI region which means that the booted guest OS now has full access to its
+allocated RAM. (#3938)
AMX Support Under a compile time gate of amx it is possible compile in support for the
+AMX instruction set extension for guest use. This also requires runtime
+enabling with --cpu features=amx.
virtio-mem (#3883)virtio-iommu backed PCI segments are now comprehensively placed behind the
+vIOMMU (#3870)virtio-fs to support direct access
+(#3848)Deprecated features will be removed in a subsequent release and users should +plan to use alternatives
mergeable option from the virtio-pmem support has been deprecated
+(#3968)dax option from the virtio-fs support has been deprecated (#3889)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted May 9, 2022 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
virtio-fs filesystem entries from config on removalvirtio-mem resize if the guest doesn’t activate the deviceSee the GitHub Release for the release assets.
Posted May 27, 2022 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v24.0 +project.
virtio-iommu virtio-iommu specification describes how a device can be attached by default
+to a bypass domain. This feature is particularly helpful for booting a VM with
+guest software which doesn’t support virtio-iommu but still need to access
+the device. Now that Cloud Hypervisor supports this feature, it can boot a VM
+with Rust Hypervisor Firmware or OVMF even if the virtio-block device exposing
+the disk image is placed behind a virtual IOMMU.
Multiple checks have been added to the code to prevent devices with identical +identifiers from being created, and therefore avoid unexpected behaviors at boot +or whenever a device was hot plugged into the VM.
Sparse mmap support has been added to both VFIO and vfio-user devices. This +allows the device regions that are not fully mappable to be partially mapped. +And the more a device region can be mapped into the guest address space, the +fewer VM exits will be generated when this device is accessed. This directly +impacts the performance related to this device.
A new serial_number option has been added to --platform, allowing a user to
+set a specific serial number for the platform. This number is exposed to the
+guest through the SMBIOS.
Deprecated features will be removed in a subsequent release and users should +plan to use alternatives
mergeable option from the virtio-pmem support has been deprecated
+(#3968)dax option from the virtio-fs support has been deprecated (#3889)A new blog post Achieving Bare Metal Performance Within a Virtual +Machine +has been added to the Cloud Hypervisor website.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted July 7, 2022 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked through the v25.0 +project.
ch-remote Improvements The ch-remote command has gained support for creating the VM from a JSON
+config and support for booting and deleting the VM from the VMM.
Under the guest_debug feature flag it is now possible to extract the memory
+of the guest for use in debugging with e.g. the crash utility. (#4012)
IFF_RUNNING on TAP devices (#4279)The following functionality has been removed:
mergeable option from the virtio-pmem support has been removed
+(#3968)dax option from the virtio-fs support has been removed (#3889)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted August 17, 2022 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked through the v26.0 +project.
--platform --platform and the appropriate API structure has gained support for supplying
+OEM strings (primarily used to communicate metadata to systemd in the guest)
+(#4319, #4446) and support for specifying the UUID (#4389.)
Support for both the MSHV and KVM hypervisors can be compiled into the same +binary with the detection of the hypervisor to use made at runtime.
SIGINT and SIGTERM signals are now handled before the VM has booted
+(#4269, #4293)virtio-fs driver in OVMF to be used (#4341, #4314)Deprecated features will be removed in a subsequent release and users should +plan to use alternatives.
kernel and initramfs members on the VmConfig have been
+moved inside a PayloadConfig as the payload member. The OpenAPI document
+has been updated to reflect the change and the old API members continue to
+function and are mapped to the new version. The expectation is that these old
+versions will be removed in the v28.0 release.The following functionality has been removed:
poll_queue parameter has been removed from --disk and
+equivalent. This was residual from the removal of the vhost-user-block
+spawning feature (#4402.)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted September 29, 2022 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked in our new roadmap +project as iteration +v27.0.
A new mailing list has been created to support broader community discussions. +Please consider subscribing; an +announcement of a regular meeting will be announced via this list shortly.
Prebuilt packages are now available. Please see this +document +on how to install. These packages also include packages for the different +firmware options available.
The MTU for the TAP device associated with a virtio-net device is now exposed
+to the guest. If the user provides a MTU with --net mtu=.. then that MTU is
+applied to created TAP interfaces. This functionality is also exposed for
+vhost-user-net devices including those created with the reference backend
+(#4658, #4676.)
Support for generating a trace report for the boot time has been added +including a script for generating an SVG from that trace (#4659.)
The set of feature flags, for e.g. experimental features, have been simplified:
msvh and kvm features provide support for those specific hypervisors
+(with kvm enabled by default),tdx provides support for Intel TDX; and although there is no MSHV support
+now it is now possible to compile with the mshv feature (#4696,)tracing adds support for boot tracing,guest_debug now covers both support for gdbing a guest (formerly gdb
+feature) and dumping guest memory.The following feature flags were removed as the functionality was enabled by
+default: amx, fwdebug, cmos and common (#4679, #4632.)
AArch64 has gained support for loading the guest kernel asynchronously like +x86-64. (#4538)
GDB stub support (accessed through --gdb under guest_debug feature) is now
+available on AArch64 as well as as x86-64.
virtio-queue that addresses an issue
+where a rogue guest can potentially DoS the VMM (rust-vmm/vm-virtio#196.)virtio-console and serial devices
+(#4520, #4533, #4535.)Deprecated features will be removed in a subsequent release and users should +plan to use alternatives.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted November 17, 2022 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our new roadmap project as iteration v28.0.
Just a reminder that we have a new mailing list to support broader community discussions. Please consider subscribing. We plan to use this to announce a regular meeting for those interested in talking about Cloud +Hypervisor development.
This is the first version of Cloud Hypervisor to be released under the LTS release process. Point releases for bug fixes will be made for the next 18 months; live migration and live upgrade will be supported between the point releases of the LTS.
Support for adding an emulated CRB TPM has been added. This has it’s own TPM documentation.
By default, but controllable through --memory thp=off if it possible to back the guest memory with Transparent Huge Pages (no file backing/shared=off) then this will be used resulting in improved boot performance.
The README has been refreshed with the quick start guide updated to reflect the different firmware options and to recommend the use of pre-built binaries.
virtio device bug fixes found through fuzzing (#4859, #4799)The following functionality has been removed:
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 14, 2022 by Cloud Hypervisor Team ‐ 1 min read
This is a bug fix release. The following issues have been addressed:
linux-loader that
+addresses an infinite loop issue (details)virtio-net including an integer overflow issue
+(#4924, #4949)cpuid information for L2 cache for older KVM on x86 (#4920)See the GitHub Release for the release assets.
Posted January 14, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap +project as iteration +v29.0. The following user visible changes have been made:
On x86-64 the binary included in releases supports both the KVM and MSHV
+hypervisor with runtime detection to identify the correct hypervisor to use.
Improvements have been made to the data structures used for both live migration +and snapshot/restore. Unfortunately this has broken compatibility with older +versions (support for migrating between major versions is not yet officially +supported but has worked for some versions.)
Improvements have been made to the volume of heap allocations when running with
+virtio-block devices along with a reduction in the peak heap size.
ch-remote Improvements Support for “pinging” the VMM and shutting the VMM down have been added to
+ch-remote.
AArch64 Documentation Integration The documentation for AArch64 support has been integrated into the main
+README.
virtio-block Counters Enhancement The counters for the virtio-block device has extended to include min/mean/max
+latency counters.
The virtio-net device has gained support for controlling the enabling of
+checksum and offloading. This allows the device to be used in environments
+where the hardware lacks support for the offloading.
linux-loader that addresses an
+infinite loop issue (details)virtio-net including an integer overflow issue
+(#4924, #4949)cpuid information for L2 cache for older KVM on x86 (#4920)virtio devices are now shutdown on reboot (#5095)No functionality has been removed in this release.
MemoryZoneConfig::file or
+MemoryConfig::file has been deprecated. This was originally used for
+supporting huge pages or shared memory backing which is now natively supported
+(#5085)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted February 24, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration +v30.0. The following user visible changes have been made:
The clap crate was replaced by the argh crate to create our command
+line, which reduced our release binary size from 3.6MB to 3.3MB. There
+were several syntax changes:
--option=value commands now are --option value.--disk DISK1 DISK2 command now is --disk DISK1 --disk DISK2.-vvv command now is -v -v -vOur vfio-user crate is extended to provide basic server side support
+with an example of gpio vfio-user device. This crate now is moved to its
+own repository under the
+rust-vmm organization.
A new building target is added for profiling purposes with examples of
+heap profiling using dhat gated by the dhat-heap feature.
The documentation on Intel TDX is expanded with details of the building +and using TD-Shim, +references to TDX Tools, and +version information of guest/host kernel/TDVF/TDShim being tested. Also, +a new ‘heap profiling’ documentation is added with improvements on the +existing ‘profiling’ documentation.
exit_evt upon thread exit (#5211)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted April 6, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration +v31.0. The following user visible changes have been made:
acpi_tables Adapted to the latest acpi_tables. There has been significant API changes in +the crate.
Updated the recommended guest kernel version from 6.1.6 to 6.2.
SIGWINCH Handler A separate thread had been created to capture the SIGWINCH signal and resize
+the guest console. Now the thread is skipped if the console is not resizable.
Two completely different code paths existed for handling console resizing, one
+for tty and the other for pty. That makes the understanding of the console
+handling code unnecessarily complicated. Now the code paths are unified. Both
+tty and pty are supported in single SIGWINCH handler. And the new handler
+can works with kernel versions earlier than v5.5.
MemoryZoneConfig::file Setting a directory to MemoryZoneConfig::file is no longer supported.
Before this change, user can set a directory to file of the --memory-zone
+option. In that case, a temporary file will be created as the backing file for
+the mmap(2) operation. This functionality has been unnecessary since we had
+the native support for hugepages and allocating anonymous shared memory.
vhost-user FS
+client.ShutdownVmm and Shutdown commands to call the correct API
+endpoint.Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted May 18, 2023 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in our roadmap project as iteration v32.0. The following user visible changes have been made:
The maximum number of PCI segments that can be used is now 96 (up from 16).
KVM_ARM_VCPU_PMU_V3 if available (#5360)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted June 29, 2023 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in our roadmap project as iteration +v33.0. The following user visible changes have been made:
A D-Bus based API has been added as an alternative to the existing REST
+API. This feature is gated by the dbus_api feature. Details can be
+found in the API documentation.
Now the CPU cache information on the host is properly exposed to the +guest on AArch64.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted August 10, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration v34.0. The following user visible changes have been made:
A new device has been added that can communicate when the guest kernel has panicked and share those details with the VMM. This is controlled with a new --pvpanic command line option and JSON API change equivalent. (#5526)
Requesting to dump the guest memory as core dump will now transparently pause the VM if required; returning to the original state after. (#5604)
The support for QCOW2 files has been enhanced to include support for using backing files. (#5573)
The minimum supported host kernel is now 5.13 in order to incorporate a bug fix for KVM_FEATURE_ASYNC_PF_INT functionality. (#5626)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted September 21, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration v35.0. +The following user visible changes have been made:
virtio-vsock Support for Linux Guest Kernel v6.3+ Since kernel v6.3, a vsock packet can be included in a single descriptor,
+instead of being split over two descriptors. Our virtio-vsock implementation
+now support both situations.
virtio-block A new option serial is added to the --block command that allows users to
+specify a serial number for block devices which will be guest visible.
This ensures migration works correctly between hosts that have different TSC +frequencies if the guest is running with TSC as the source of timekeeping.
static mut with once_cell (#5772)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted November 2, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration +v36.0. The following user visible changes have been made:
We switched back to use the clap crate to create our command line,
+since the argh crate is barely maintained. There were several syntax
+changes:
--option value commands now are --option=value.--disk DISK1 --disk DISK2 command now is --disk DISK1 DISK2.-v -v -vcommand now is -vvv.Note: the released binary size increased around 0.3M due to this change.
Now the enabled (Cargo) features of the running Cloud Hypervisor
+instance can be queried via API endpoint (/vmm.ping) and CLI
+(--version -v).
The --numa command is augmented with a new option pci_segment=, so
+that users can define the relationship between PCI segments and NUMA
+nodes. Examples can be found from the memory documentation
Now the CPU topology on x86_64 platforms supports multiple vendors.
The --serial command is augmented with a new option socket=, allowing
+users to access the serial port using a Unix socket.
An AIO backend is added for virtio-block devices to improve block
+device performance when the io_uring feature is not supported by the
+host Operating System.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 14, 2023 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration +v37.0. The following user visible changes have been made:
This release is a LTS release. Point releases for bug fixes will be made +for the next 18 months; live migration and live upgrade will be +supported between the point releases of the LTS.
Now VFIO devices with 32-bit memory BARs can be attached to non-zero PCI +segments on the guest, allowing users to have more 32-bit devices and +assign such devices to appropriate NUMA nodes for better performance.
Named TAP devices now accepts IP configuration from users, such as IP +and MAC address, as long as the named TAP device is created by Cloud +Hypervisor (e.g. not existing TAP devices).
Now legacy serial device and virtio console can be set as TTY mode as
+the same time. This allows users to capture early boot logs with the
+legacy serial device without losing performance benefits of using
+virtio-console, when appropriate kernel configuration is used (such as
+using kernel command-line console=hvc0 earlyprintk=ttyS0 on x86).
The speed of VM restoration from snapshots is improved with a better +implementation of deserializing JSON files.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted February 16, 2024 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap +project as iteration +v38.0. The following user visible changes have been made:
Users now can throttle a group of block devices with the new
+--rate-limiter-group option. Details can be found from the I/O
+Throttling documentation
Users now have the option to pin virt-queue threads for block devices +to specific host cpus.
The boot time with prefault option enabled is optimized via parallel
+memory prefault.
A ‘debug-console’ device is added to provide a user-configurable debug +port for logging guest information. Details can be found from the Debug +IO Ports documentation.
All non-emulated MMIO regions of VFIO devices are now mapped to the VFIO +container, allowing PCIe P2P between all VFIO devices on the same +VM. This is required for a wide variety of multi-GPU workloads involving +GPUDirect P2P (DMA between two GPUs), GPUDirect RDMA (DMA between a GPU +and an IB device).
Users now can set the vcpu affinity to a host CPU with index larger +than 255.
virtio-vsock(#6080, #6091, #6095)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted April 27, 2024 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap +project as iteration +v39.0. The following user visible changes have been made:
It is now possible to use --pci-segment to adjust the aperture size that
+devices 32-bit and 64-bit PCI device BARs will be allocated from. Previously
+the address space was equally distributed across all the segments which may
+leave insufficient space for devices that require a large 32-bit space. With
+this change the weighting per segment can be adjusted. (#6387)
Support for directly booting Linux from bzImages has been added.(#6200)
The x_nv_gpudirect_clique option was added to --device to allow the
+configuration of device P2P support with NVIDIA GPUs. (#6235)
A new API endpoint and ch-remote option added for injecting an NMI into the
+guest. (#6047)
sigwinch_listener process (#6208)queue_affinity option in OpenAPI metadata (#6268)virtio-vsock
+(#6306)virtio-fs tag validation (#6358, #6359)pvpanic device to OpenAPI metadata (#6372)virtio-mem regions with snapshot/restore (#6337,
+#6338)event-monitor events around reboot (#6277, #6274)ch-remote with no subcommand (#6230)virtio devices after snapshot/restore and live migration
+(#6326, #6265)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted June 21, 2024 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap +project as iteration +v40.0. The following user visible changes have been made:
It is now possible to pass file descriptors over the HTTP API (and using
+ch-remote) when restoring to update the file descriptors for network devices.
+This enables snapshot & restore functionality for guests using macvtap or
+other file descriptor backed network devices. (#6286)
ch-remote remove-device has been improved (#6456)--serial
+(#6486)cpuid instructions and by seeding the in kernel file descriptor table
+(#6498, #6478)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted August 16, 2024 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap +project as iteration +v41.0. The following user visible changes have been made:
VMM support has been added for this experimental functionality (requires +currently out of tree Linux kernel patches) to allow guests to control its +physical memory properties to allow optimisations and security features. +(#6318, #6467)
Support for restricting the VMM process using the Linux kernel “Landlock” API
+has been added - this can be used to restrict the files (and the read/write
+permissions) that the VMM process can access. This adds another layer of
+security alongside the existing sycall filters (seccomp) - this can be
+enabled with --landlock and fully documentated. (#5170)
virtio-net via the use of a cache of Iovec
+structures (#6636)EVENT_IDX") support has been added to
+virtio-block giving a 60% improvement in single queue block throughput and
+IOPs performance (#6580)status field in virtio-block state (#6586)fcntl syscall on debug assertions so this is now
+included in the virtio-device seccomp filters for tests that use this (#6648)virtio-vsock device (#6621)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted October 25, 2024 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in our roadmap +project as iteration +v42.0. The following user visible changes have been made:
The SVE and SVE2 feature bits are now propagated through to the guest on +AArch64. (#6678, #6691)
virtio-console resizing (#6704)virtio device to a VM that has been restored (#6775)During this release cycle a new VFIO CI worker has been provided by Crusoe +Energy and a new ARM64 CI worker has been provided by Ubicloud.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 18, 2024 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in our roadmap +project as iteration +v43.0. The following user visible changes have been made:
Support has been added to enable direct live migration from two hosts via TCP +connections. This supplements the existing support for migrating over a UNIX +socket which can then be tunnelled as desired. The documentation has been +updated. (#6850)
The VIRTIO_RING_F_INDIRECT_DESC feature has been enabled for virtio-block
+devices. This significantly increases the throughput of the devices with a
+small negative impact on latency. (#6826)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted February 3, 2025 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in our roadmap project as iteration +v44.0. The following user visible changes have been made:
virtio-iommu Address Width The iommu_address_width option has been added to --platform to allow users
+to limit the virtio-iommu address space in the guest. (#6900)
The VIRTIO_BLK_F_SEG_MAX feature has been enabled for virtio-block devices,
+which brings significant performance improvements on throughput. (#6885)
The io_uring entries are no longer forced to use async helper workers,
+delegating the decision to the kernel. This change resolved the issue of having
+excessive amount of worker threads when io_uring is being used, which is
+expected to improve performance, such as reducing memory usage and reduce CPU
+contention.
Our continuous fuzzing infrastructure is augmented with two new fuzzers to cover
+x86 instruction emulator and virtio-vsock.
Many thanks to everyone who has contributed to our release:
This release has been tracked in our roadmap +project as iteration +v43.0. The following user visible changes have been made:
Support has been added to enable direct live migration from two hosts via TCP +connections. This supplements the existing support for migrating over a UNIX +socket which can then be tunnelled as desired. The documentation has been +updated. (#6850)
The VIRTIO_RING_F_INDIRECT_DESC feature has been enabled for virtio-block
+devices. This significantly increases the throughput of the devices with a
+small negative impact on latency. (#6826)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted March 30, 2025 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in v45.0 group of our roadmap project.
riscv64 Architecture Support Cloud Hypervisor now has experimental riscv64 architecture
+support. Details can be found from the riscv
+documentation.
To improve the readability of CLI options, the output of the --help
+now is alphabetically sorted. (#6988)
The downtime of VM live migration is reduced via delaying some of the +tearing down process of the source VM after the destination VM is up and +running. (#6987)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted May 23, 2025 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in v46.0 group of our roadmap project.
--disk Now file-level locking is enforced for disk images, provided by users
+with --disk. This ensures that only a single Cloud Hypervisor instance
+can obtain write access to a given disk image at any time, preventing
+misconfiguration and avoiding potential data corruption. (#6974)
Instead of returning a generic error 400 (e.g. BadRequest), users
+now get a more specific error 429 (e.g. TooManyRequests) when a
+pending VM resizing is not completed. This allows users to better handle
+different errors, say retrying the request when applicable. (#7043)
--net It is now possible to specify an IPv6 address and mask when creating a
+network interface with --net. (#7048)
It is now possible to start VMs on AArch64 platforms when using MSHV +hypervisor. (#7055)
The SGX support now is deprecated with a warning message if it being +used, with the intention to remove its support from our code base in two +release cycles (e.g. v48.0). (#7090)
path as required for DiskConfig from the OpenAPI spec file
+(#7017)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted July 22, 2025 by Cloud Hypervisor Team ‐ 1 min read
This release has been tracked in v47.0 group of our roadmap project.
Instead of exiting on I/O errors, the virtio-block device now reports
+errors to the guest using VIRTIO_BLK_S_IOERR. It improves the user
+experience particularly when the guest rootfs is not backed by the
+affected block device. (#7107)
We now have the chain of errors being reported and printed nicely, when +Cloud Hypervisor or ch-remote exits on errors. (#7066)
To improve readability, ch-remote now prints help information in +alphabetical order. (#7130)
virtio-vsock (#7195)rtc_pl031 device to
+prevent spurious guest interrupts (#7199)192.168.249.1) and mask (255.255.255.0) are
+currently assigned to the virtio-net device if no value is specified
+by users. Such behavior is now deprecated. Users of this behavior will
+receive a warning message and should make adjustments. The behavior
+will be removed in two release cycles (v49.0).See the GitHub Release for the release assets.
Posted September 12, 2025 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in v48.0 group of our roadmap project.
fw_cfg Device Support This feature enables passing configuration data and files, such as VM +boot configurations (kernel, kernel cmdline, e820 memory map, and ACPI +tables), from the host to the guest. (#7117)
ivshmem Device Support Support for inter-VM shared memory has been added. For more information, +please refer to the ivshmem documentation. (#6703)
riscv64 In addition to direct kernel boot, firmware boot support has been added
+on riscv64 hosts. (#7249)
The maximum number of supported vCPUs on x86_64 hosts using KVM has been +raised from 254 to 8192. (#7299)
Performance for virtio-blk with small block sizes (16KB and below)
+is enhanced via submitting async IO requests in batches. (#7146)
The VM pause operation now is significantly faster particularly for VMs +with a large number of vCPUs. (#7290)
Our Windows documentation now includes instructions to run Windows 11 +guests, in addition to Windows Server guests. (#7218)
We will decline any contributions known to contain contents generated or +derived from using Large Language Models (LLMs). Details can be found +in our contributing documentation. (#7162)
The SGX support has been removed, as announced in the deprecation notice two +release cycles ago. (#7093)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted November 9, 2025 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in v49.0 group of our roadmap project.
On AArch64 with MSHV, firmware boot (#7391) and graceful guest shutdown +(#7354) are now supported, and CI coverage has been added (#7381).
Logs now use an improved timestamp format (#7355), emit an explicit +message on graceful shutdown (#7346), and reduce noisy warnings with +newer guest kernels (#7423).
virtio-net Devices The implicit default IP (192.168.249.1) and netmask (255.255.255.0) for
+virtio-net devices have been removed, as previously announced in the
+deprecation notice two releases ago (#7365). Users now can create
+virtio-net devices with no IP and netmask assigned.
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted December 19, 2025 by Cloud Hypervisor Team ‐ 2 min read
This release has been tracked in v50.0 group of our roadmap project.
The nested=on|off option has been added to --cpu to allow users
+to configure nested virtualization support in the guest on x86_64
+hosts (for both KVM and MSHV). The default value is on to maintain
+consistency with existing behavior. (#7408)
QCOW2 support has been extended to handle compression clusters based on +zlib and zstd. (#7462)
Performance of live migration has been improved via an optimized +implementation of dirty bitmap maintenance. (#7468)
The /vm.resize-disk API has been introduced to allow users to resize block
+devices backed by raw images while a guest is running. (#7476)
Significant improvements have been made to developer experience and
+productivity. These include a simplified root manifest, codified and
+tightened Clippy lints, and streamlined workflows for cargo clippy and
+cargo test. (#7489)
Block devices now use byte-range advisory locks instead of whole-file +locks. While both approaches prevent multiple Cloud Hypervisor instances +from simultaneously accessing the same disk image with write +permissions, byte-range locks provide better compatibility with network +storage backends. (#7494)
Logs now include event information generated by the event-monitor +module. (#7512)
Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted February 20, 2026 by Cloud Hypervisor Team ‐ 1 min read
This is a point release containing security fixes and bug fixes.
This release fixes a security vulnerability in disk image handling. +Details can be found in GHSA-jmr4-g2hv-mjj6.
backing_files=on|off option has been added to --disk to
+explicitly control whether QCOW2 backing files are permitted. This
+defaults to off to prevent the loading of backing files entirely.
+(#7685)num_queues > 1) (#7661)See the GitHub Release for the release assets.
Posted February 20, 2026 by Cloud Hypervisor Team ‐ 3 min read
This release has been tracked in v51.0 group of our roadmap project.
This release fixes a security vulnerability in disk image handling. +Details can be found in GHSA-jmr4-g2hv-mjj6.
backing_files=on|off option has been added to --disk to
+explicitly control whether QCOW2 backing files are permitted. This
+defaults to off to prevent the loading of backing files entirely.
+(#7685)A large number of QCOW2 v3 specification features have been implemented:
num_queues > 1)
+(#7661)ACPI Generic Initiator Affinity (SRAT Type 5) support has been added
+to associate VFIO-PCI devices with dedicated memory/CPU-less NUMA
+nodes. This enables the guest OS to make NUMA-aware memory allocation
+decisions for device workloads. A new device_id parameter has been
+added to --numa for specifying VFIO devices. (#7626)
The virtio-blk device now supports DISCARD and WRITE_ZEROES
+operations for QCOW2 and RAW image formats. This enables thin
+provisioning and efficient space reclamation when guests trim
+filesystems. A new sparse=on|off option has been added to --disk to
+control disk space management: sparse=on (default) enables thin
+provisioning with space reclamation, while sparse=off provides thick
+provisioning with consistent I/O latency. (#7666)
shared=on) via madvise. Previously, THP
+was only used for non-shared memory. (#7646)vhost-user-net device now uses the default set of vhost-user
+virtio features, including VIRTIO_F_RING_INDIRECT_DESC, which
+provides a performance improvement. (#7653)threads_per_core > 1 (#7668)save_data_tables() to unblock VM pause/resume (#7692)GHCB_INFO_SPECIAL_DBGPRINT VMG exit in SEV-SNP guest exit
+handler (#7703)shared=false (#7674)VIRTIO_BLK_F_RO even if guest does not negotiate it
+(#7705)vhost-user-block get_config interoperability (#7617)gettid() to all seccomp filters (#7596)+ instead of hex
+characters (#7579)--net fd help text (#7702)Many thanks to everyone who has contributed to our release:
See the GitHub Release for the release assets.
Posted February 20, 2026 by Cloud Hypervisor Team ‐ 3 min read
Posted February 20, 2026 by Cloud Hypervisor Team ‐ 1 min read
Posted December 19, 2025 by Cloud Hypervisor Team ‐ 2 min read
Posted November 9, 2025 by Cloud Hypervisor Team ‐ 2 min read
Posted September 12, 2025 by Cloud Hypervisor Team ‐ 2 min read
Posted July 22, 2025 by Cloud Hypervisor Team ‐ 1 min read
Posted May 23, 2025 by Cloud Hypervisor Team ‐ 2 min read
Posted July 7, 2022 by Sebastien Boeuf ‐ 6 min read
Depending on the expectations around the workload running in a Virtual Machine +(VM), as well as the agreement between a customer and an operator, multiple use +cases related to memory management between host and guest arise and they can be +addressed through different techniques.
This section is about how memory can be managed by the host, as it knows the VM +is going to need more or less memory during its runtime.
This is particularly well illustrated by the Cloud Native use case, involving +containers being created or destroyed, leading to the need to adjust the amount +of memory available.
We can also take the example of users running a single VM with a certain amount +of RAM until they need more because a different type of workload requires a +larger amount of memory.
We don’t want to run a very large VM just in case we might need more memory +later because the larger the VM, the more expensive the cost. And that’s why +having access to such flexibility is very convenient for our users.
This technique was the former way of performing some sort of hot plug/unplug +without dealing with the complexity associated with adding or removing memory +slots. The logic is reversed in a sense, as the VM is created with the maximum +amount of memory that can be expected, and a balloon is inflated inside the +guest, in order to reduce the available amount of memory.
From a host perspective, if the system is running low on memory, inflating the +balloon from one or several VMs can help the host getting some memory back to +keep the system afloat. +On the other hand if the host has enough spare memory to give some more to one +of its VMs, it can deflate the balloon.
It is important to note that until recently (introduction of virtio-mem),
+ballooning was the only reliable way of removing memory from a running VM.
This is the proper technique for adding or removing memory to a VM, without
+using a tool like a balloon. And there are two different ways of performing
+memory hot plug, one is through ACPI and the other is with virtio-mem.
This has been the standard way for hot plugging some memory into a VM for quite +some time. This is because the mechanism is part of the ACPI specification and +has been implemented in guest OSes such as Linux and Windows years ago.
Depending on the guest OS, limitations might differ, but for instance with Linux +there is no way to plug a memory DIMM smaller than 128 MiB. This doesn’t allow +for fine grained hot plug, but that’s good enough for practical use cases, as we +usually increase a VM’s memory using gigabyte as the unit.
The main drawback is the hot unplug really. This is inconvenient and complicated
+for such operation to succeed because we must remove an entire memory DIMM. For
+instance, if we plug an extra 2 GiB to a running VM, and later on we want to
+remove only 1 GiB, this won’t be possible as we can only remove 2 GiB. Of course
+we could have plugged the extra 2 GiB splitting it into 16 DIMMs of 128 MiB, but
+we would hit the limit on how many memory slots are actually supported.
+And even if we could work around this limitation, main part of the complexity
+comes from the fact we have no way to ensure the guest isn’t using any of the
+memory slot we’re trying to remove.
+This makes hot unplug a non supported feature with ACPI hot plug mechanism.
+This is the reason why before virtio-mem came into the picture, we could
+combine ACPI hot plug with balloon, so that we would use the ACPI
+mechanism for hot plugging while the balloon would be used for both hot unplug
+and fine grained operations.
This fairly recent solution has been introduced to address all use cases we
+mentioned so far. It is a full hot plug/unplug solution as it doesn’t have any
+of the drawbacks from balloon and ACPI hot plug. It allows for fine grained
+hot plug/unplug, relying on the virtio-mem driver in the guest to manage small
+chunks of memory. Unless you have strong reasons not to use virtio-mem (guest
+kernel not recent enough for instance), this should be the standard way of
+dynamically managing a VM’s memory. This feature is available starting with
+Linux 5.8.
Now that we’ve covered how the host can manage the guest memory allocated to a +VM, let’s look at the way the guest can notify the host for accurate memory +management.
In general, when a VM is created with a certain amount of memory, only a small +portion of what is available is actually consumed by the VM after boot. But as +soon as one or multiple workloads run inside the guest, the memory starts being +paged. At this point, let’s say 90% of the VM’s memory have been allocated, the +guest might start freeing some pages after the workloads have been stopped. This +will have no impact on the amount of memory consumed from a host perspective, as +it has no way to know which pages the guest might have freed.
When looking for a way to overcommit memory, meaning when running multiple VMs
+and the sum of all guest RAMs is higher than what the physical RAM can offer,
+being able to free pages based on guest’s input is a must. And that’s what the
+feature free page reporting has been added for in the virtio-balloon
+specification.
This feature requires a virtio-balloon device to be created, but it doesn’t
+require any balloon because inflating or deflating operations are not part of
+the mechanism. When enabled, the guest will have a way of notifying the VMM
+about set of pages being freed. Based on this information, the VMM will advise
+the host that these pages are no longer in use, which effectively gives some
+memory back to the host.
One small drawback of this feature is that depending on how frequently the guest +reports back to the host, the VMM will have more work and be slightly less +efficient. And that’s because the slight drop in performance that could be +observed that we didn’t enable this feature by default on Cloud Hypervisor.
When running critical workloads that must not fail, particularly because of
+memory pressure in the guest, the feature deflate on OOM can be very
+convenient as it will give the guest the power to deflate the balloon when
+an Out Of Memory (OOM) event happens.
Of course this means the balloon must have been previously inflated, otherwise
+the guest will have nothing to deflate and no way to release the memory
+pressure.
If you want to use these features with Cloud Hypervisor, please refer to the +following documentation:
Posted March 30, 2025 by Cloud Hypervisor Team ‐ 1 min read
Posted February 3, 2025 by Cloud Hypervisor Team ‐ 2 min read
Posted December 18, 2024 by Cloud Hypervisor Team ‐ 1 min read
Posted October 25, 2024 by Cloud Hypervisor Team ‐ 1 min read
Posted August 16, 2024 by Cloud Hypervisor Team ‐ 2 min read
Posted June 21, 2024 by Cloud Hypervisor Team ‐ 2 min read
Posted April 27, 2024 by Cloud Hypervisor Team ‐ 2 min read
Posted February 16, 2024 by Cloud Hypervisor Team ‐ 2 min read
Posted December 14, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted November 2, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted September 21, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted August 10, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted June 29, 2023 by Cloud Hypervisor Team ‐ 1 min read
Posted May 18, 2023 by Cloud Hypervisor Team ‐ 1 min read
Posted April 6, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted February 24, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted January 14, 2023 by Cloud Hypervisor Team ‐ 2 min read
Posted December 14, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted November 17, 2022 by Cloud Hypervisor Team ‐ 2 min read
Posted September 29, 2022 by Cloud Hypervisor Team ‐ 3 min read
Posted August 17, 2022 by Cloud Hypervisor Team ‐ 2 min read
Posted July 7, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted July 7, 2022 by Sebastien Boeuf ‐ 6 min read
Posted May 27, 2022 by Cloud Hypervisor Team ‐ 2 min read
Posted May 19, 2022 by Sebastien Boeuf ‐ 7 min read
Posted May 9, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted April 14, 2022 by Cloud Hypervisor Team ‐ 2 min read
Posted March 11, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted March 11, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted March 3, 2022 by Cloud Hypervisor Team ‐ 3 min read
Posted January 20, 2022 by Cloud Hypervisor Team ‐ 2 min read
Posted January 4, 2022 by Cloud Hypervisor Team ‐ 1 min read
Posted December 13, 2021 by Cloud Hypervisor Team ‐ 1 min read
Posted December 2, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted October 14, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted September 9, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted July 22, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted June 10, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted April 29, 2021 by Cloud Hypervisor Team ‐ 3 min read
Posted March 31, 2021 by Cloud Hypervisor Team ‐ 1 min read
Posted March 26, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted February 12, 2021 by Cloud Hypervisor Team ‐ 2 min read
Posted December 10, 2020 by Cloud Hypervisor Team ‐ 1 min read
Posted October 29, 2020 by Cloud Hypervisor Team ‐ 3 min read
Posted September 18, 2020 by Cloud Hypervisor Team ‐ 2 min read
Posted August 6, 2020 by Cloud Hypervisor Team ‐ 3 min read
Posted June 11, 2020 by Cloud Hypervisor Team ‐ 3 min read
Posted April 30, 2020 by Cloud Hypervisor Team ‐ 3 min read
Posted March 20, 2020 by Cloud Hypervisor Team ‐ 2 min read
Posted February 7, 2020 by Cloud Hypervisor Team ‐ 2 min read
Posted December 13, 2019 by Cloud Hypervisor Team ‐ 3 min read
Posted October 18, 2019 by Cloud Hypervisor Team ‐ 2 min read
Posted September 10, 2019 by Cloud Hypervisor Team ‐ 2 min read