Subcategories

  • VMs, hosts, pools, networks and all other usual management tasks.

    408 Topics
    3k Posts
    dthenotD
    @cmanos No problem, glad I could helped. As Olivier also pointed above, it's not an issue anymore when using QCOW2 which is currently in beta, so hopefully it's only a short workaround
  • ACLs, Self-service, Cloud-init, Load balancing...

    96 Topics
    811 Posts
    olivierlambertO
    Indeed, and it's clearly stated at https://docs.xen-orchestra.com/community
  • All XO backup features: full and incremental, replication, mirrors...

    417 Topics
    4k Posts
    A
    Also having issue. Didnt see the issue until my Remote SR has ran out of free space. { "data": { "mode": "delta", "reportWhen": "always", "backupReportTpl": "compactMjml", "hideSuccessfulItems": true }, "id": "1762146000010", "jobId": "95ac8089-69f3-404e-b902-21d0e878eec2", "jobName": "Backup Job 1", "message": "backup", "scheduleId": "76989b41-8bcf-4438-833a-84ae80125367", "start": 1762146000010, "status": "success", "infos": [ { "data": { "vms": [ "b25a5709-f1f8-e942-f0cc-f443eb9b9cf3", "5e00a752-177e-33f1-c5f5-ade59826cdab", "836db883-fdca-7458-0308-f225bbecbc29", "ce624d33-8f9c-8587-771b-194a161bdfed", "bce2b7f4-d602-5cdf-b275-da9554be61d3", "e0a3093a-52fd-f8dc-1c39-075eeb9d0314", "f0a673d7-dfb7-5766-86be-6dab83ac264b" ] }, "message": "vms" } ], "tasks": [ { "data": { "type": "VM", "id": "5e00a752-177e-33f1-c5f5-ade59826cdab", "name_label": "XO" }, "id": "1762146003418:0", "message": "backup VM", "start": 1762146003418, "status": "success", "tasks": [ { "id": "1762146003424", "message": "clean-vm", "start": 1762146003424, "status": "success", "end": 1762146003569, "result": { "merge": false } }, { "id": "1762146003933", "message": "snapshot", "start": 1762146003933, "status": "success", "end": 1762146005573, "result": "f8159d5c-c1b3-e4b9-155f-70bc2d2a8325" }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762146005573:0", "message": "export", "start": 1762146005573, "status": "success", "tasks": [ { "id": "1762146009606", "message": "transfer", "start": 1762146009606, "status": "success", "end": 1762146316193, "result": { "size": 25839009792 } }, { "id": "1762146317322", "message": "health check", "start": 1762146317322, "status": "success", "tasks": [ { "id": "1762146317328", "message": "transfer", "start": 1762146317328, "status": "success", "end": 1762146462139, "result": { "size": 0, "id": "bf74ff0e-a2e4-deab-bd59-f51a8985fee3" } }, { "id": "1762146462139:0", "message": "vmstart", "start": 1762146462139, "status": "success", "end": 1762146470513 } ], "end": 1762146472941 }, { "id": "1762146472956", "message": "clean-vm", "start": 1762146472956, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/5e00a752-177e-33f1-c5f5-ade59826cdab/20251103T050009Z.json", "actual": 25839009792, "expected": 25845451264 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762146473085, "result": { "merge": false } } ], "end": 1762146473086 } ], "warnings": [ { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:ec4c2c3f-3be7-7533-8a0e-810d36a007a9" }, "message": "Snapshot data has been deleted" } ], "end": 1762146473086 }, { "data": { "type": "VM", "id": "836db883-fdca-7458-0308-f225bbecbc29", "name_label": "Veeam" }, "id": "1762146473089", "message": "backup VM", "start": 1762146473089, "status": "success", "tasks": [ { "id": "1762146473094", "message": "clean-vm", "start": 1762146473094, "status": "success", "end": 1762146473361, "result": { "merge": false } }, { "id": "1762146473566", "message": "snapshot", "start": 1762146473566, "status": "success", "end": 1762146476087, "result": "ae3273f7-8044-63b3-d552-a6c41918c921" }, { "id": "1762147064647", "message": "health check", "start": 1762147064647, "status": "success", "infos": [ { "message": "This VM doesn't match the health check's tags for this schedule" } ], "end": 1762147064647 }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762146476087:0", "message": "export", "start": 1762146476087, "status": "success", "tasks": [ { "id": "1762146479793", "message": "transfer", "start": 1762146479793, "status": "success", "end": 1762147062777, "result": { "size": 88705335296 } }, { "id": "1762147064653", "message": "clean-vm", "start": 1762147064653, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/836db883-fdca-7458-0308-f225bbecbc29/20251103T050759Z.json", "actual": 88705335296, "expected": 88727520256 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762147065070, "result": { "merge": false } } ], "end": 1762147065071 } ], "warnings": [ { "message": "Backup fell back to a full" }, { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:d50ed8b7-a642-c9ec-29bb-2933401ba26b" }, "message": "Snapshot data has been deleted" }, { "data": { "vdiRef": "OpaqueRef:eb35900c-8e97-7bff-ca5a-ae8aced9897a" }, "message": "Snapshot data has been deleted" } ], "end": 1762147065071 }, { "data": { "type": "VM", "id": "ce624d33-8f9c-8587-771b-194a161bdfed", "name_label": "Bookstack" }, "id": "1762147065075", "message": "backup VM", "start": 1762147065075, "status": "success", "tasks": [ { "id": "1762147065080", "message": "clean-vm", "start": 1762147065080, "status": "success", "end": 1762147065172, "result": { "merge": false } }, { "id": "1762147065258", "message": "snapshot", "start": 1762147065258, "status": "success", "end": 1762147067979, "result": "dd829de3-4fd1-a027-e039-726834458b61" }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": true, "type": "remote" }, "id": "1762147067979:0", "message": "export", "start": 1762147067979, "status": "success", "tasks": [ { "id": "1762147069665", "message": "transfer", "start": 1762147069665, "status": "success", "end": 1762147176187, "result": { "size": 7098859520 } }, { "id": "1762147177274:0", "message": "health check", "start": 1762147177274, "status": "success", "tasks": [ { "id": "1762147177280", "message": "transfer", "start": 1762147177280, "status": "success", "end": 1762147231441, "result": { "size": 0, "id": "36c43dbc-51b0-f059-3268-224e46a88840" } }, { "id": "1762147231441:0", "message": "vmstart", "start": 1762147231441, "status": "success", "end": 1762147239586 } ], "end": 1762147242037 }, { "id": "1762147242051", "message": "clean-vm", "start": 1762147242051, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/ce624d33-8f9c-8587-771b-194a161bdfed/20251103T051749Z.json", "actual": 7098859520, "expected": 7100725760 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762147242435, "result": { "merge": false } } ], "end": 1762147242441 } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:deb7a513-2a0f-449a-7112-3ff34a4c527d" }, "message": "Snapshot data has been deleted" } ], "end": 1762147242441 }, { "data": { "type": "VM", "id": "b25a5709-f1f8-e942-f0cc-f443eb9b9cf3", "name_label": "SeedBox" }, "id": "1762146003418", "message": "backup VM", "start": 1762146003418, "status": "success", "tasks": [ { "id": "1762146003423", "message": "clean-vm", "start": 1762146003423, "status": "success", "end": 1762146003667, "result": { "merge": false } }, { "id": "1762146004138", "message": "snapshot", "start": 1762146004138, "status": "success", "end": 1762146006647, "result": "713fce77-e557-d646-9564-09ec8899c7c6" }, { "id": "1762147876503:0", "message": "health check", "start": 1762147876503, "status": "success", "infos": [ { "message": "This VM doesn't match the health check's tags for this schedule" } ], "end": 1762147876505 }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762146006647:0", "message": "export", "start": 1762146006647, "status": "success", "tasks": [ { "id": "1762146009248", "message": "transfer", "start": 1762146009248, "status": "success", "end": 1762147875274, "result": { "size": 81128325120 } }, { "id": "1762147876510", "message": "clean-vm", "start": 1762147876510, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/b25a5709-f1f8-e942-f0cc-f443eb9b9cf3/20251103T050009Z.json", "actual": 81128325120, "expected": 81148396032 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762147876874, "result": { "merge": false } } ], "end": 1762147876875 } ], "warnings": [ { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:556f651e-a2f8-0ade-eba9-aa4bf60a932d" }, "message": "Snapshot data has been deleted" } ], "end": 1762147876875 }, { "data": { "type": "VM", "id": "bce2b7f4-d602-5cdf-b275-da9554be61d3", "name_label": "iVentoy" }, "id": "1762147242451", "message": "backup VM", "start": 1762147242451, "status": "success", "tasks": [ { "id": "1762147242490", "message": "clean-vm", "start": 1762147242490, "status": "success", "end": 1762147242740, "result": { "merge": false } }, { "id": "1762147242958", "message": "snapshot", "start": 1762147242958, "status": "success", "end": 1762147244657, "result": "810aa1cf-61ee-a968-3409-2c8721a46832" }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762147244657:0", "message": "export", "start": 1762147244657, "status": "success", "tasks": [ { "id": "1762147248730", "message": "transfer", "start": 1762147248730, "status": "success", "end": 1762147584610, "result": { "size": 71422705664 } }, { "id": "1762147586650", "message": "health check", "start": 1762147586650, "status": "success", "tasks": [ { "id": "1762147586679", "message": "transfer", "start": 1762147586679, "status": "success", "end": 1762148076914, "result": { "size": 0, "id": "f1aa67ee-f368-d459-f93b-bc415ecd6a87" } }, { "id": "1762148076914:0", "message": "vmstart", "start": 1762148076914, "status": "success", "end": 1762148080972 } ], "end": 1762148083243 }, { "id": "1762148083285", "message": "clean-vm", "start": 1762148083285, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/bce2b7f4-d602-5cdf-b275-da9554be61d3/20251103T052048Z.json", "actual": 71422705664, "expected": 71440669184 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762148084735, "result": { "merge": false } } ], "end": 1762148084739 } ], "warnings": [ { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:923380c6-4bb9-8741-8a87-03d16bc94151" }, "message": "Snapshot data has been deleted" } ], "end": 1762148084739 }, { "data": { "type": "VM", "id": "f0a673d7-dfb7-5766-86be-6dab83ac264b", "name_label": "UnifiOS" }, "id": "1762148084748", "message": "backup VM", "start": 1762148084748, "status": "success", "tasks": [ { "id": "1762148084782", "message": "clean-vm", "start": 1762148084782, "status": "success", "end": 1762148085288, "result": { "merge": false } }, { "id": "1762148085501", "message": "snapshot", "start": 1762148085501, "status": "success", "end": 1762148087396, "result": "b8e5a515-6028-3dbb-ac3d-24ef7d6c6c96" }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762148087396:0", "message": "export", "start": 1762148087396, "status": "success", "tasks": [ { "id": "1762148091153", "message": "transfer", "start": 1762148091153, "status": "success", "end": 1762148365286, "result": { "size": 24178065408 } }, { "id": "1762148366667:0", "message": "health check", "start": 1762148366667, "status": "success", "tasks": [ { "id": "1762148366673", "message": "transfer", "start": 1762148366673, "status": "success", "end": 1762148547340, "result": { "size": 0, "id": "35638498-aa3a-6f69-de69-47d867b7927f" } }, { "id": "1762148547340:0", "message": "vmstart", "start": 1762148547340, "status": "success", "end": 1762148555601 } ], "end": 1762148558079 }, { "id": "1762148558093", "message": "clean-vm", "start": 1762148558093, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/f0a673d7-dfb7-5766-86be-6dab83ac264b/20251103T053451Z.json", "actual": 24178065408, "expected": 24184232448 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762148558480, "result": { "merge": false } } ], "end": 1762148558481 } ], "warnings": [ { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:f053e31e-284a-ae00-741c-eff4e9f97f01" }, "message": "Snapshot data has been deleted" } ], "end": 1762148558481 }, { "data": { "type": "VM", "id": "e0a3093a-52fd-f8dc-1c39-075eeb9d0314", "name_label": "Docker of Things" }, "id": "1762147876878", "message": "backup VM", "start": 1762147876878, "status": "success", "tasks": [ { "id": "1762147876884", "message": "clean-vm", "start": 1762147876884, "status": "success", "end": 1762147877204, "result": { "merge": false } }, { "id": "1762147877403", "message": "snapshot", "start": 1762147877403, "status": "success", "end": 1762147880513, "result": "2753d0e3-1cd5-c34c-2e5e-3c6dba85633d" }, { "data": { "id": "b86b4c88-79f9-4ce8-9fa4-dce78a32ea44", "isFull": false, "type": "remote" }, "id": "1762147880513:0", "message": "export", "start": 1762147880513, "status": "success", "tasks": [ { "id": "1762147884329", "message": "transfer", "start": 1762147884329, "status": "success", "end": 1762148429641, "result": { "size": 65131249664 } }, { "id": "1762148430966:0", "message": "health check", "start": 1762148430966, "status": "success", "tasks": [ { "id": "1762148430991", "message": "transfer", "start": 1762148430991, "status": "success", "end": 1762148812721, "result": { "size": 0, "id": "734681b1-e706-b96b-9efb-168acac01bb5" } }, { "id": "1762148812721:0", "message": "vmstart", "start": 1762148812721, "status": "success", "end": 1762148818820 } ], "end": 1762148820961 }, { "id": "1762148820974", "message": "clean-vm", "start": 1762148820974, "status": "success", "warnings": [ { "data": { "path": "/xo-vm-backups/e0a3093a-52fd-f8dc-1c39-075eeb9d0314/20251103T053124Z.json", "actual": 65131249664, "expected": 65147677184 }, "message": "cleanVm: incorrect backup size in metadata" } ], "end": 1762148821309, "result": { "merge": false } } ], "end": 1762148821310 } ], "warnings": [ { "message": "Backup fell back to a full" } ], "infos": [ { "message": "Transfer data using NBD" }, { "message": "will delete snapshot data" }, { "data": { "vdiRef": "OpaqueRef:68285439-7986-bd76-a88a-5da4a381d548" }, "message": "Snapshot data has been deleted" } ], "end": 1762148821310 } ], "end": 1762148821310 }
  • Everything related to Xen Orchestra's REST API

    71 Topics
    555 Posts
    olivierlambertO
    Yes, they are created either: on demand for users when we need in XO 6 Now, XO 6 is becoming the main driver for new endpoint as the UI is providing more and more features.
  • Terraform, Packer or any tool to do IaC

    40 Topics
    379 Posts
    CyrilleC
    @carloum70 Disk migration isn't supported by the provider yet. What you can do it's only ignore the changes to the sr_id of a given disk. For example for the first disk: lifecycle { ignore_changes = [ disk[0].sr_id ] } You can also manually do the migration in XO and then after edit your HCL to update the sr_id with the new ID. It should do the trick.
  • Does XOA pause scheduled backups during XOA upgrades?

    7
    1
    0 Votes
    7 Posts
    763 Views
    julien-fJ
    XO does not pause backups during upgrades/restart, all currently running backups will be interrupted as @olivierlambert said. An interrupted backup is not a big problem by itself, nothing will be broken, and the next run will run properly. Backups are only run at the time they are scheduled, if XO is offline at this time, it will not automatically run them when restarted, it will wait for the next scheduled run.
  • Roadmap XO6

    11
    0 Votes
    11 Posts
    4k Views
    olivierlambertO
    Our doc is up to date here: https://docs.xcp-ng.org/project/ecosystem/#-vm-backup
  • XO Sources on Host

    Moved
    5
    0 Votes
    5 Posts
    930 Views
    CTGC
    @olivierlambert Merci Oliver!
  • XCP-ng host status enabled but can't access it.

    Solved
    15
    1
    0 Votes
    15 Posts
    2k Views
    olivierlambertO
    Ah indeed, it's written in bold in the documentation, I forgot to ask you about this ^^ Enjoy XO!
  • XOA Proxy and Console Access

    Solved
    15
    0 Votes
    15 Posts
    2k Views
    olivierlambertO
    Thanks everyone for the report!
  • Console Zoom Percentage Slightly Cutoff

    1
    1
    0 Votes
    1 Posts
    168 Views
    No one has replied
  • Preventing "new network" detection on different XCP-NG hosts

    14
    0 Votes
    14 Posts
    3k Views
    nikadeN
    @fohdeesha said in Preventing "new network" detection on different XCP-NG hosts: @Zevgeny As you suspected, this is caused by the VMs booting with new & fresh MAC addresses they've never seen before, which from their point of view means an entirely new NIC. The "clean" solution here would be to have an option or toggle inside of XOA backup jobs, that allows MAC addresses to be preserved - this way the replicated DR VMs on the backup site still have the same MAC addresses. This could be something you could file as a feature request on our github However note that pretty much any VM copy/replicate action will trigger xen to generate a new MAC address (for safety, duplicated MACs are usually bad). This means that even if the MAC is preserved to the DR VMs, when your admin copies the DR VMs and boots them, the new copies will have newly generated MACs. I suppose it would be possible to add a "preserve MAC" checkbox for copy operations too, but I'm not sure if XAPI currently exposes such functionality. Yea, keeping the mac adress would really solve this. We actually edit the mac adress manually on the copied VM's incase we need to start the DR copy up for testing.
  • VMware migration tool not bringing disks

    Solved
    7
    0 Votes
    7 Posts
    937 Views
    ItMeCorbanI
    Yep the snapshots solved it. And I was even able to import the VMs while they're still running so that was a bonus. Thanks for your help @Danp
  • Import from VMWare fails Error: 404 Not Found

    4
    0 Votes
    4 Posts
    744 Views
    S
    @florent will do
  • Updated XOA with kernel >5.3 to support nconnect nfs option

    34
    1 Votes
    34 Posts
    7k Views
    M
    @manilx 2 more and yes seems to be confusing and just to round it up. Same VM as above Delta (full) backup using NBD without and with "nconnect=6" in the remote setting: [image: 1714397312022-screenshot-2024-04-29-at-14.27.19.png] with nconnect=6 [image: 1714397346566-screenshot-2024-04-29-at-14.28.57.png] nconnect=6 doesn't seem to do a lot.
  • Technique for shared repo of cloudinit templates

    cloudinit
    4
    0 Votes
    4 Posts
    425 Views
    olivierlambertO
    Your pre-recorded Cloudconfig own setup is user-wide (so per user). It won't be universal for everyone.
  • Default console username and password on XOA Appliance

    Solved
    10
    0 Votes
    10 Posts
    24k Views
    K
    @olivierlambert Hey thanks a lot for that tip.
  • Cannot delete failed XO VMware-related migration tasks

    14
    0 Votes
    14 Posts
    2k Views
    D
    @gb-123 Yes, I have tried that and it appears to have deleted the tasks. First I had to register as follows: # xo-cli --register <URL of XOA> <admin username> <password> Then: # xo-cli rest get tasks which showed a list of tasks IDs. I verified the IDs with the ID show in the raw log for each of the tasks listed in XOA. Once I figured out which task(s) I wanted to delete, I did the following for each tasks: # xo-cli rest del <tasks/task ID> I have not seen an ill effects since I deleted some tasks last August. Below is a link to removing tasks using xo-cli: https://help.vates.tech/kb/en-us/31-troubleshooting-and-tips/123-how-to-remove-xo-tasks
  • Warm migration stuck at 0%

    Solved
    12
    1
    1 Votes
    12 Posts
    1k Views
    J
    Currently in commit 2f962 and it's working. Thanks you all
  • Easy way to find a failed task?

    2
    0 Votes
    2 Posts
    542 Views
    D
    @Pyroteq said in Easy way to find a failed task?: It seems as soon as you refresh the page everything disappears. The Tasks page only shows logs while you're viewing it. Afterwards I think they get recorded to the Logs, but if your system fell asleep XO wouldn't know what happened. Besides the connection getting dropped and I don't know if that would get recorded.
  • Debian 12 template - long load on bios sreen at startup

    Solved
    4
    1
    0 Votes
    4 Posts
    467 Views
    olivierlambertO
    Yes indeed, if you are not passing a static IP in the template, Cloud init will wait to get an IP for few minutes before continuing to boot
  • OVA import support?

    ova
    5
    0 Votes
    5 Posts
    4k Views
    florentF
    @wttw said in OVA import support?: @olivierlambert said in OVA import support?: Yes it's supposed to work. Sadly, there's as much as different OVAs than VMs and VMware versions. Every time we are fixing new stuff when it's reported. Please provide the OVA somewhere so we can find why and fix it https://tupid.org/debian11.ova - it's about 850MB. I just created this on ESXi 7.0U3, then exported it with ./ovftool --noSSLVerify --diskMode=thin --targetType=ova --compress=9 "vi://skull/debian11" . (VMware ovftool 4.6.2 (build-22220919)) Import from VMware should work if you have indeed the XCP-ng machine in extra for it. The hardware is on order, but I thought I'd start testing this weekend. But we could fix the OVA already, at least as soon we can check what's wrong with the XML. Here's debian11.ovf, for anyone who wants to look without downloading the ova. <?xml version="1.0" encoding="UTF-8"?> <!--Generated by VMware ESX Server, User: root, UTC time: 2024-04-07T15:42:54.40766Z--> <Envelope vmw:buildId="build-19482537" xmlns="http://schemas.dmtf.org/ovf/envelope/1" xmlns:cim="http://schemas.dmtf.org/wbem/wscim/1/common" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <References> <File ovf:compression="gzip" ovf:href="debian11-disk1.vmdk.gz" ovf:id="file1" ovf:size="856235944"/> <File ovf:compression="gzip" ovf:href="debian11-file1.nvram.gz" ovf:id="file2" ovf:size="1821"/> </References> <DiskSection> <Info>Virtual disk information</Info> <Disk ovf:capacity="16" ovf:capacityAllocationUnits="byte * 2^30" ovf:diskId="vmdisk1" ovf:fileRef="file1" ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="2324692992"/> </DiskSection> <NetworkSection> <Info>The list of logical networks</Info> <Network ovf:name="VM Network"> <Description>The VM Network network</Description> </Network> </NetworkSection> <VirtualSystem ovf:id="debian11"> <Info>A virtual machine</Info> <Name>debian11</Name> <OperatingSystemSection ovf:id="1" vmw:osType="debian11Guest"> <Info>The kind of installed guest operating system</Info> </OperatingSystemSection> <VirtualHardwareSection> <Info>Virtual hardware requirements</Info> <System> <vssd:ElementName>Virtual Hardware Family</vssd:ElementName> <vssd:InstanceID>0</vssd:InstanceID> <vssd:VirtualSystemIdentifier>debian11</vssd:VirtualSystemIdentifier> <vssd:VirtualSystemType>vmx-19</vssd:VirtualSystemType> </System> <Item> <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits> <rasd:Description>Number of Virtual CPUs</rasd:Description> <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName> <rasd:InstanceID>1</rasd:InstanceID> <rasd:ResourceType>3</rasd:ResourceType> <rasd:VirtualQuantity>1</rasd:VirtualQuantity> </Item> <Item> <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits> <rasd:Description>Memory Size</rasd:Description> <rasd:ElementName>2048MB of memory</rasd:ElementName> <rasd:InstanceID>2</rasd:InstanceID> <rasd:ResourceType>4</rasd:ResourceType> <rasd:VirtualQuantity>2048</rasd:VirtualQuantity> </Item> <Item> <rasd:Address>0</rasd:Address> <rasd:Description>SATA Controller</rasd:Description> <rasd:ElementName>SATA Controller 0</rasd:ElementName> <rasd:InstanceID>3</rasd:InstanceID> <rasd:ResourceSubType>vmware.sata.ahci</rasd:ResourceSubType> <rasd:ResourceType>20</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="slotInfo.pciSlotNumber" vmw:value="34"/> </Item> <Item> <rasd:Address>0</rasd:Address> <rasd:Description>SCSI Controller</rasd:Description> <rasd:ElementName>SCSI Controller 0</rasd:ElementName> <rasd:InstanceID>4</rasd:InstanceID> <rasd:ResourceSubType>VirtualSCSI</rasd:ResourceSubType> <rasd:ResourceType>6</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="slotInfo.pciSlotNumber" vmw:value="160"/> </Item> <Item ovf:required="false"> <rasd:Address>0</rasd:Address> <rasd:Description>USB Controller (EHCI)</rasd:Description> <rasd:ElementName>USB Controller</rasd:ElementName> <rasd:InstanceID>5</rasd:InstanceID> <rasd:ResourceSubType>vmware.usb.ehci</rasd:ResourceSubType> <rasd:ResourceType>23</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="slotInfo.pciSlotNumber" vmw:value="32"/> <vmw:Config ovf:required="false" vmw:key="slotInfo.ehciPciSlotNumber" vmw:value="33"/> <vmw:Config ovf:required="false" vmw:key="autoConnectDevices" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="ehciEnabled" vmw:value="true"/> </Item> <Item> <rasd:Address>1</rasd:Address> <rasd:Description>IDE Controller</rasd:Description> <rasd:ElementName>VirtualIDEController 1</rasd:ElementName> <rasd:InstanceID>6</rasd:InstanceID> <rasd:ResourceType>5</rasd:ResourceType> </Item> <Item> <rasd:Address>0</rasd:Address> <rasd:Description>IDE Controller</rasd:Description> <rasd:ElementName>VirtualIDEController 0</rasd:ElementName> <rasd:InstanceID>7</rasd:InstanceID> <rasd:ResourceType>5</rasd:ResourceType> </Item> <Item ovf:required="false"> <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation> <rasd:ElementName>VirtualVideoCard</rasd:ElementName> <rasd:InstanceID>8</rasd:InstanceID> <rasd:ResourceType>24</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="useAutoDetect" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="videoRamSizeInKB" vmw:value="4096"/> <vmw:Config ovf:required="false" vmw:key="enable3DSupport" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="use3dRenderer" vmw:value="automatic"/> <vmw:Config ovf:required="false" vmw:key="graphicsMemorySizeInKB" vmw:value="262144"/> </Item> <Item ovf:required="false"> <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation> <rasd:ElementName>VirtualVMCIDevice</rasd:ElementName> <rasd:InstanceID>9</rasd:InstanceID> <rasd:ResourceSubType>vmware.vmci</rasd:ResourceSubType> <rasd:ResourceType>1</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="allowUnrestrictedCommunication" vmw:value="false"/> </Item> <Item ovf:required="false"> <rasd:AddressOnParent>0</rasd:AddressOnParent> <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation> <rasd:ElementName>CD-ROM 1</rasd:ElementName> <rasd:InstanceID>10</rasd:InstanceID> <rasd:Parent>3</rasd:Parent> <rasd:ResourceSubType>vmware.cdrom.atapi</rasd:ResourceSubType> <rasd:ResourceType>15</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="connectable.allowGuestControl" vmw:value="true"/> </Item> <Item> <rasd:AddressOnParent>0</rasd:AddressOnParent> <rasd:ElementName>Hard Disk 1</rasd:ElementName> <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource> <rasd:InstanceID>11</rasd:InstanceID> <rasd:Parent>4</rasd:Parent> <rasd:ResourceType>17</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="backing.writeThrough" vmw:value="false"/> </Item> <Item> <rasd:AddressOnParent>7</rasd:AddressOnParent> <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation> <rasd:Connection>VM Network</rasd:Connection> <rasd:Description>VmxNet3 ethernet adapter on &quot;VM Network&quot;</rasd:Description> <rasd:ElementName>Ethernet 1</rasd:ElementName> <rasd:InstanceID>12</rasd:InstanceID> <rasd:ResourceSubType>VmxNet3</rasd:ResourceSubType> <rasd:ResourceType>10</rasd:ResourceType> <vmw:Config ovf:required="false" vmw:key="slotInfo.pciSlotNumber" vmw:value="192"/> <vmw:Config ovf:required="false" vmw:key="wakeOnLanEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="connectable.allowGuestControl" vmw:value="true"/> </Item> <vmw:Config ovf:required="false" vmw:key="cpuHotAddEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="cpuHotRemoveEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="memoryHotAddEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="firmware" vmw:value="bios"/> <vmw:Config ovf:required="false" vmw:key="cpuAllocation.shares.shares" vmw:value="1000"/> <vmw:Config ovf:required="false" vmw:key="cpuAllocation.shares.level" vmw:value="normal"/> <vmw:Config ovf:required="false" vmw:key="tools.syncTimeWithHost" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="tools.syncTimeWithHostAllowed" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="tools.afterPowerOn" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="tools.afterResume" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestShutdown" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="tools.beforeGuestStandby" vmw:value="true"/> <vmw:Config ovf:required="false" vmw:key="tools.toolsUpgradePolicy" vmw:value="manual"/> <vmw:Config ovf:required="false" vmw:key="powerOpInfo.powerOffType" vmw:value="soft"/> <vmw:Config ovf:required="false" vmw:key="powerOpInfo.resetType" vmw:value="soft"/> <vmw:Config ovf:required="false" vmw:key="powerOpInfo.suspendType" vmw:value="soft"/> <vmw:Config ovf:required="false" vmw:key="nestedHVEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="vPMCEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="virtualICH7MPresent" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="virtualSMCPresent" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="flags.vvtdEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="flags.vbsEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="bootOptions.efiSecureBootEnabled" vmw:value="false"/> <vmw:Config ovf:required="false" vmw:key="powerOpInfo.standbyAction" vmw:value="checkpoint"/> <vmw:ExtraConfig ovf:required="false" vmw:key="nvram" vmw:value="ovf:/file/file2"/> <vmw:ExtraConfig ovf:required="false" vmw:key="svga.autodetect" vmw:value="TRUE"/> </VirtualHardwareSection> </VirtualSystem> </Envelope> this is an ova with vmdk disk compressed. They are not supported for now (since vmdk are already compressed per block). TBF, we're putting much of the import/export effort into the v2v tool
  • xo-cli examples

    10
    0 Votes
    10 Posts
    2k Views
    I
    @julien-f 0k, is there a specific rule on the format of this identifier? When I create a backup, I set an email address for reporting but that address or notification setting) is not present in the JSON, why and how can I set it ? For rolling snapshots, the content is 0k (same identifier in both settings and schedule): { "name": "rolling snapshots", "mode": "full", "schedules": { "gbkk1mbemhg": { "cron": "15 12 * * *", "enabled": true, "name": "daily rolling snapshots", "timezone": "Europe/Rome" } }, "settings": { "": { "concurrency": 2, "nRetriesVmBackupFailures": 3, "timeout": 7200000, "offlineSnapshot": false, "checkpointSnapshot": true }, "gbkk1mbemhg": { "healthCheckSr": "ac595bf5-ca14-586e-6f89-9e4e884043eb", "healthCheckVmsWithTags": [], "snapshotRetention": 2 } }, "vms": { "id": { "__or": [ "64adb2e9-e0f3-9e70-b08f-7c8653415053", "aaf9e807-3ee9-37ff-05ec-cf782ba74d56", "7cd276ae-b7df-06d8-3da8-48262750051d", "9ccfcd0c-a35e-7f45-a0d2-19db678291f1", "42afaaea-ada4-fb7f-dc55-f62a51a6997a", "54b97ba5-9858-a563-e02f-89b0a6c450a1", "9524834a-3577-4445-54c6-1c786775749d" ] } } } Ok for the VM backup using: { "name": "vm backups", "mode": "full", "compression": "zstd", "schedules": { "fdi73l44ewf": { "cron": "40 12 * * 6", "enabled": true, "name": "weekly vm backups", "timezone": "Europe/Rome" } }, "settings": { "": { "concurrency": 2, "nRetriesVmBackupFailures": 3, "timeout": 7200000, "offlineSnapshot": false, "checkpointSnapshot": true }, "fdi73l44ewf": { "exportRetention": 1, "healthCheckSr": "ac595bf5-ca14-586e-6f89-9e4e884043eb", "healthCheckVmsWithTags": [] } }, "remotes": { "id": "40e5ecde-f12f-43c7-befd-c1f0a76e8a25" }, "vms": { "id": { "__or": [ "64adb2e9-e0f3-9e70-b08f-7c8653415053", "aaf9e807-3ee9-37ff-05ec-cf782ba74d56", "7cd276ae-b7df-06d8-3da8-48262750051d", "9ccfcd0c-a35e-7f45-a0d2-19db678291f1", "42afaaea-ada4-fb7f-dc55-f62a51a6997a", "54b97ba5-9858-a563-e02f-89b0a6c450a1", "9524834a-3577-4445-54c6-1c786775749d" ] } } }
  • XOA pricing guide

    7
    0 Votes
    7 Posts
    2k Views
    olivierlambertO
    We'll keep XOA standalone for XenServer, the pricing will likely change for those (per host and only with Premium). In your case, you'll have 2 options: going for XOA standalone for XenServer per host (dunno yet the price per host we'll have) migrating to XCP-ng to get an Essential bundle
  • How do i migrate ALL vms from one host to another host?

    7
    0 Votes
    7 Posts
    5k Views
    Z
    This helped me https://www.youtube.com/watch?v=H5PJ_tHQlZk