@olivierlambert Thank you very much for the info.
Better this way, I will create a pool with the 4 equal hosts.
@olivierlambert Thank you very much for the info.
Better this way, I will create a pool with the 4 equal hosts.
Hello.
I'm migrating my latest ESXi server, and since the XO update in the last commit, I can't import VMs from the XO tool or by importing OVAs.
Xen Orchestra, commit 0a0ae
Master, commit ddc9b
log error:
{
"id": "0mkpqc8rl",
"properties": {
"name": "importing vms 323",
"userId": "8f8faf6a-10d1-4d19-b4b4-1a3cc3160011",
"total": 1,
"done": 0,
"progress": 0
},
"start": 1769103152480,
"status": "failure",
"updatedAt": 1769103158620,
"tasks": [
{
"id": "55qd0sxt1qs",
"properties": {
"name": "importing vm 323",
"done": 1,
"progress": 100
},
"start": 1769103152480,
"status": "failure",
"tasks": [
{
"id": "nfilsz8xbk",
"properties": {
"name": "connecting to 192.168.90.51"
},
"start": 1769103152480,
"status": "success",
"end": 1769103152722,
"result": {
"_events": {},
"_eventsCount": 0
}
},
{
"id": "dw7wwunqox5",
"properties": {
"name": "get metadata of 323"
},
"start": 1769103152722,
"status": "success",
"end": 1769103153037,
"result": {
"name_label": "PostfixTestDavid",
"memory": 3166699520,
"nCpus": 4,
"guestToolsInstalled": false,
"guestId": "debian6_64Guest",
"guestFullName": [
"Debian GNU/Linux 6 (64-bit)"
],
"firmware": "bios",
"powerState": "poweredOff",
"disks": [
{
"capacity": 21474836480,
"isFull": true,
"uid": "fd68d003",
"fileName": "PostfixTestDavid-flat.vmdk",
"parentId": "ffffffff",
"vmdkFormat": "VMFS",
"nameLabel": "PostfixTestDavid-flat.vmdk",
"usingVsan": false,
"datastore": "StorageVM_04",
"path": "PostfixTestDavid",
"diskPath": "PostfixTestDavid/PostfixTestDavid.vmdk",
"descriptionLabel": "",
"node": "scsi0:0"
}
],
"networks": [
{
"label": "Guest",
"macAddress": "00:50:56:a6:46:87",
"isGenerated": false
}
],
"vmId": "323"
}
},
{
"id": "y66ov437opm",
"properties": {
"name": "creating VM on XCP side"
},
"start": 1769103153037,
"status": "success",
"end": 1769103153334,
"result": {
"uuid": "c9242431-95b4-d7c5-5a2f-9cc914f5d6ed",
"allowed_operations": [
"create_vtpm",
"destroy",
"metadata_export",
"export",
"make_into_template",
"changing_NVRAM",
"changing_shadow_memory",
"changing_static_range",
"changing_dynamic_range",
"migrate_send",
"start_on",
"start",
"copy",
"clone",
"snapshot"
],
"current_operations": {},
"name_label": "PostfixTestDavid",
"name_description": "from esxi -- source guest id :debian6_64Guest -- template used:CentOS Stream 9",
"power_state": "Halted",
"user_version": 1,
"is_a_template": false,
"is_default_template": false,
"suspend_VDI": "OpaqueRef:NULL",
"resident_on": "OpaqueRef:NULL",
"scheduled_to_be_resident_on": "OpaqueRef:NULL",
"affinity": "OpaqueRef:NULL",
"memory_overhead": 31457280,
"memory_target": 0,
"memory_static_max": 3166699520,
"memory_dynamic_max": 3166699520,
"memory_dynamic_min": 3166699520,
"memory_static_min": 2147483648,
"VCPUs_params": {},
"VCPUs_max": 4,
"VCPUs_at_startup": 4,
"actions_after_softreboot": "soft_reboot",
"actions_after_shutdown": "destroy",
"actions_after_reboot": "restart",
"actions_after_crash": "restart",
"consoles": [],
"VIFs": [],
"VBDs": [],
"VUSBs": [],
"crash_dumps": [],
"VTPMs": [],
"PV_bootloader": "",
"PV_kernel": "",
"PV_ramdisk": "",
"PV_args": "",
"PV_bootloader_args": "",
"PV_legacy_args": "",
"HVM_boot_policy": "BIOS order",
"HVM_boot_params": {
"order": "cdn"
},
"HVM_shadow_multiplier": 1,
"platform": {
"cores-per-socket": "1",
"vga": "std",
"videoram": "8",
"viridian": "false",
"device_id": "0001",
"device-model": "qemu-upstream-compat",
"secureboot": "false",
"nx": "true",
"acpi": "1",
"apic": "true",
"pae": "true",
"hpet": "true"
},
"PCI_bus": "",
"other_config": {
"mac_seed": "2c8bcffe-aee3-c6b0-9525-58f6a19e63e8",
"vgpu_pci": "",
"base_template_name": "Other install media",
"install-methods": "cdrom"
},
"domid": -1,
"domarch": "",
"last_boot_CPU_flags": {},
"is_control_domain": false,
"metrics": "OpaqueRef:8f1660a9-e72c-46a7-1ff5-40c786746b29",
"guest_metrics": "OpaqueRef:NULL",
"last_booted_record": "",
"recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"137438953472\" /><restriction field=\"vcpus-max\" max=\"32\" /><restriction property=\"number-of-vbds\" max=\"255\" /><restriction property=\"number-of-vifs\" max=\"7\" /><restriction field=\"has-vendor-device\" value=\"false\" /></restrictions>",
"xenstore_data": {},
"ha_always_run": false,
"ha_restart_priority": "",
"is_a_snapshot": false,
"snapshot_of": "OpaqueRef:NULL",
"snapshots": [],
"snapshot_time": "19700101T00:00:00Z",
"transportable_snapshot_id": "",
"blobs": {},
"tags": [],
"blocked_operations": {},
"snapshot_info": {},
"snapshot_metadata": "",
"parent": "OpaqueRef:NULL",
"children": [],
"bios_strings": {},
"protection_policy": "OpaqueRef:NULL",
"is_snapshot_from_vmpp": false,
"snapshot_schedule": "OpaqueRef:NULL",
"is_vmss_snapshot": false,
"appliance": "OpaqueRef:NULL",
"start_delay": 0,
"shutdown_delay": 0,
"order": 0,
"VGPUs": [],
"attached_PCIs": [],
"suspend_SR": "OpaqueRef:NULL",
"version": 0,
"generation_id": "0:0",
"hardware_platform_version": 0,
"has_vendor_device": false,
"requires_reboot": false,
"reference_label": "",
"domain_type": "hvm",
"NVRAM": {},
"pending_guidances": [],
"pending_guidances_recommended": [],
"pending_guidances_full": [],
"groups": []
}
},
{
"id": "539oscxzftt",
"properties": {
"name": "build disks and snapshots chains for 323"
},
"start": 1769103153481,
"status": "success",
"end": 1769103153481,
"result": {
"scsi0:0": [
{
"capacity": 21474836480,
"isFull": true,
"uid": "fd68d003",
"fileName": "PostfixTestDavid-flat.vmdk",
"parentId": "ffffffff",
"vmdkFormat": "VMFS",
"nameLabel": "PostfixTestDavid-flat.vmdk",
"usingVsan": false,
"datastore": "StorageVM_04",
"path": "PostfixTestDavid",
"diskPath": "PostfixTestDavid/PostfixTestDavid.vmdk",
"descriptionLabel": "",
"node": "scsi0:0"
}
]
}
},
{
"id": "35yj1qpe7d2",
"properties": {
"name": "Import of disks scsi0:0"
},
"start": 1769103153481,
"status": "failure",
"infos": [
{
"message": "Importing disk in vhd format, with block of 2097152 bytes"
},
{
"message": "no reference disk found, fall back a full import"
}
],
"warnings": [
{
"message": {
"succeeded": {}
}
}
],
"end": 1769103158587,
"result": {
"message": "stream has ended without data, was looking for 134 bytes",
"name": "Error",
"stack": "Error: stream has ended without data, was looking for 134 bytes\n at readChunkStrict (/opt/xo/xo-builds/xen-orchestra-202601151504/@vates/read-chunk/index.js:83:13)"
}
}
],
"infos": [
{
"message": "No previously transfered VM found, do a full transfer"
}
],
"end": 1769103158618,
"result": {
"message": "stream has ended without data, was looking for 134 bytes",
"name": "Error",
"stack": "Error: stream has ended without data, was looking for 134 bytes\n at readChunkStrict (/opt/xo/xo-builds/xen-orchestra-202601151504/@vates/read-chunk/index.js:83:13)"
}
}
],
"end": 1769103158620,
"result": {
"succeeded": {},
"message": "stream has ended without data, was looking for 134 bytes",
"name": "Error",
"stack": "Error: stream has ended without data, was looking for 134 bytes\n at readChunkStrict (/opt/xo/xo-builds/xen-orchestra-202601151504/@vates/read-chunk/index.js:83:13)"
}
}
I have XO running with a proxy password using Nginx.
I don't think that has anything to do with it, right?
Thanks!!!!
@olivierlambert said in Issues migration esxi to do the virtual machine Windows server and SQL in xo:
This message comes from your ESXi host, not XO. I have limited expertise on VMware, but I would check the logs there to understand if there's a problem.
Could it be that the drive where the hard drives are in vmware does not have enough space to take snapshots of them?
Since there are 220 GB between the three hard drives.
@olivierlambert
Hi.
Sorry.
I created a virtual machine in XO/XCP-ng and it created it perfectly.
I migrated a similar virtual machine but with only one hard drive and it also migrated it fine.
@olivierlambert thanks olivier
Yes I can create the job from import---->vmware.
I can access the vmware host as root user and full permissions.
The job starts to perform the migration, stops after 40-45 min.
In xo it creates the vm and the hard drive, but they are corrupted.
I tried to boot the machine created in xo in case I was lucky.
Thanks in advance.
Hello.
I'm having problems migrating a virtual machine from esxi to xo.
This machine has 3 hard drives of 150GB + 20GB + 100GB.
When checking the error logs, I get an error:
"message": "503 Service Unavailable https://xxx.xxx.xxx.xxx/folder/testSql2017/testSql2017_3-000001-delta.vmdk?dcPath=ha-datacenter&dsName=StorageVM_03",
This error appears on each hard drive. I understand that the delta is the snapshot that xo takes before migrating?
Then I have these more errors on the import:
"task": {
"uuid": "4ee878dd-53f4-85b0-e5a3-7e19aa9a17e8",
"name_label": "[XO] Importing content into VDI [ESXI]testSql2017_1-flat.vmdk on SR Nfs_VM02",
"name_description": "",
"allowed_operations": [],
"current_operations": {},
"created": "20240921T21:34:42Z",
"finished": "20240921T21:55:00Z",
"status": "failure",
"resident_on": "OpaqueRef:621f8618-cbed-40ee-9b57-81c5ed682dac",
"progress": 1,
"type": "<none/>",
"result": "",
"error_info": [
"VDI_IO_ERROR",
"Device I/O errors"
I guess they are related.
Any idea how to fix it??
Thanks.
@olivierlambert Thank you very much for the info.
Better this way, I will create a pool with the 4 equal hosts.
@olivierlambert Ok, understood.
The issue is that the current server which is master is the most different from the other 4 we have.
We have this fifth one for testing and migrations.
The others are all the same.
Can VMs be migrated in different pools?
Could one host be changed from master to another once we have the same ones in the same pool??
Thanks in advance.
@olivierlambert Now it has been added,
But it has also copied the vlan configurations with the network cards....
Is that so?
If on host 1 in eth1 I had vlan 40 for storage
on host 2 do I have to have it the same?
Can't I have it on eth4???
@olivierlambert Thank you for your prompt response.
If I understood correctly, the slave host only needs to have the manager network configured.
But I have more vlans that access the shared sr. Do I have to configure these later?