@nathanael-h Nice
If you have any questions let me know, I have been using this for all our on prem clusters for a while now.
@nathanael-h Nice
If you have any questions let me know, I have been using this for all our on prem clusters for a while now.
I do not have any asks ATM, but I thought I would just share my plan that I use to create k8s clusters that we have been using for a while now.
It has grown over time and may be a bit messy, but figured better then nothing. We use this for rke2 rancher k8s clusters deployed onto out xcp-ng cluster. We use xostor for drives, and the vlan5 network is for piraeus operator to use for pv. We also use IPVS. We are using a rocky linux 9 vm template.
If these are useful to anyone and they have questions I will do my best to answer.
variable "pool" {
default = "OVBH-PROD-XENPOOL04"
}
variable "network0" {
default = "Native vRack"
}
variable "network1" {
default = "VLAN80"
}
variable "network2" {
default = "VLAN5"
}
variable "cluster_name" {
default = "Production K8s Cluster"
}
variable "enrollment_command" {
default = "curl -fL https://rancher.<redacted>.net/system-agent-install.sh | sudo sh -s - --server https://rancher.<redacted>.net --label 'cattle.io/os=linux' --token <redacted>"
}
variable "node_type" {
description = "Node type flag"
default = {
"1" = "--etcd --controlplane",
"2" = "--etcd --controlplane",
"3" = "--etcd --controlplane",
"4" = "--worker",
"5" = "--worker",
"6" = "--worker",
"7" = "--worker --taints smtp=true:NoSchedule",
"8" = "--worker --taints smtp=true:NoSchedule",
"9" = "--worker --taints smtp=true:NoSchedule"
}
}
variable "node_networks" {
description = "Node network flag"
default = {
"1" = "--internal-address 10.1.8.100 --address <redacted>",
"2" = "--internal-address 10.1.8.101 --address <redacted>",
"3" = "--internal-address 10.1.8.102 --address <redacted>",
"4" = "--internal-address 10.1.8.103 --address <redacted>",
"5" = "--internal-address 10.1.8.104 --address <redacted>",
"6" = "--internal-address 10.1.8.105 --address <redacted>",
"7" = "--internal-address 10.1.8.106 --address <redacted>",
"8" = "--internal-address 10.1.8.107 --address <redacted>",
"9" = "--internal-address 10.1.8.108 --address <redacted>"
}
}
variable "vm_name" {
description = "Node type flag"
default = {
"1" = "OVBH-VPROD-K8S01-MASTER01",
"2" = "OVBH-VPROD-K8S01-MASTER02",
"3" = "OVBH-VPROD-K8S01-MASTER03",
"4" = "OVBH-VPROD-K8S01-WORKER01",
"5" = "OVBH-VPROD-K8S01-WORKER02",
"6" = "OVBH-VPROD-K8S01-WORKER03",
"7" = "OVBH-VPROD-K8S01-WORKER04",
"8" = "OVBH-VPROD-K8S01-WORKER05",
"9" = "OVBH-VPROD-K8S01-WORKER06"
}
}
variable "preferred_host" {
default = {
"1" = "85838113-e4b8-4520-9f6d-8f3cf554c8f1",
"2" = "783c27ac-2dcb-4798-9ca8-27f5f30791f6",
"3" = "c03e1a45-4c4c-46f5-a2a1-d8de2e22a866",
"4" = "85838113-e4b8-4520-9f6d-8f3cf554c8f1",
"5" = "783c27ac-2dcb-4798-9ca8-27f5f30791f6",
"6" = "c03e1a45-4c4c-46f5-a2a1-d8de2e22a866",
"7" = "85838113-e4b8-4520-9f6d-8f3cf554c8f1",
"8" = "783c27ac-2dcb-4798-9ca8-27f5f30791f6",
"9" = "c03e1a45-4c4c-46f5-a2a1-d8de2e22a866"
}
}
variable "xoa_admin_password" {
}
variable "host_count" {
description = "All drives go to xostor"
default = {
"1" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"2" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"3" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"4" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"5" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"6" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"7" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"8" = "479ca676-20a1-4051-7189-a4a9ca47e00d",
"9" = "479ca676-20a1-4051-7189-a4a9ca47e00d"
}
}
variable "network1_ip_mapping" {
description = "Mapping for network1 ips, vlan80"
default = {
"1" = "10.1.8.100",
"2" = "10.1.8.101",
"3" = "10.1.8.102",
"4" = "10.1.8.103",
"5" = "10.1.8.104",
"6" = "10.1.8.105",
"7" = "10.1.8.106",
"8" = "10.1.8.107",
"9" = "10.1.8.108"
}
}
variable "network1_gateway" {
description = "Mapping for public ip gateways, from hosts"
default = "10.1.8.1"
}
variable "network1_prefix" {
description = "Prefix for the network used"
default = "22"
}
variable "network2_ip_mapping" {
description = "Mapping for network2 ips, VLAN5"
default = {
"1" = "10.2.5.30",
"2" = "10.2.5.31",
"3" = "10.2.5.32",
"4" = "10.2.5.33",
"5" = "10.2.5.34",
"6" = "10.2.5.35",
"7" = "10.2.5.36",
"8" = "10.2.5.37",
"9" = "10.2.5.38"
}
}
variable "network2_prefix" {
description = "Prefix for the network used"
default = "22"
}
variable "network0_ip_mapping" {
description = "Mapping for network0 ips, public"
default = {
<redacted>
}
}
variable "network0_gateway" {
description = "Mapping for public ip gateways, from hosts"
default = {
<redacted>
}
}
variable "network0_prefix" {
description = "Prefix for the network used"
default = {
<redacted>
}
}
# Instruct terraform to download the provider on `terraform init`
terraform {
required_providers {
xenorchestra = {
source = "vatesfr/xenorchestra"
version = "~> 0.29.0"
}
}
}
# Configure the XenServer Provider
provider "xenorchestra" {
# Must be ws or wss
url = "ws://10.2.0.5" # Or set XOA_URL environment variable
username = "admin@admin.net" # Or set XOA_USER environment variable
password = var.xoa_admin_password # Or set XOA_PASSWORD environment variable
}
data "xenorchestra_pool" "pool" {
name_label = var.pool
}
data "xenorchestra_template" "template" {
name_label = "Rocky Linux 9 Template"
pool_id = data.xenorchestra_pool.pool.id
}
data "xenorchestra_network" "net1" {
name_label = var.network1
pool_id = data.xenorchestra_pool.pool.id
}
data "xenorchestra_network" "net2" {
name_label = var.network2
pool_id = data.xenorchestra_pool.pool.id
}
data "xenorchestra_network" "net0" {
name_label = var.network0
pool_id = data.xenorchestra_pool.pool.id
}
resource "xenorchestra_cloud_config" "node" {
count = 9
name = "${lower(lookup(var.vm_name, count.index + 1))}_cloud_config"
template = <<EOF
#cloud-config
ssh_authorized_keys:
- ssh-rsa <redacted>
write_files:
- path: /etc/NetworkManager/conf.d/rke2-canal.conf
permissions: '0755'
owner: root
content: |
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:flannel*
- path: /tmp/selinux_kmod_drbd.log
permissions: '0640'
owner: root
content: |
type=AVC msg=audit(1661803314.183:778): avc: denied { module_load } for pid=148256 comm="insmod" path="/tmp/ko/drbd.ko" dev="overlay" ino=101839829 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:object_r:var_lib_t:s0 tclass=system permissive=0
type=AVC msg=audit(1661803314.185:779): avc: denied { module_load } for pid=148257 comm="insmod" path="/tmp/ko/drbd_transport_tcp.ko" dev="overlay" ino=101839831 scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:object_r:var_lib_t:s0 tclass=system permissive=0
- path: /etc/sysconfig/modules/ipvs.modules
permissions: 0755
owner: root
content: |
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
- path: /etc/modules-load.d/ipvs.conf
permissions: 0755
owner: root
content: |
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
#cloud-init
runcmd:
- sudo hostnamectl set-hostname --static ${lower(lookup(var.vm_name, count.index + 1))}.<redacted>.com
- sudo hostnamectl set-hostname ${lower(lookup(var.vm_name, count.index + 1))}.<redacted>.com
- nmcli -t -f NAME con show | xargs -d '\n' -I {} nmcli con delete "{}"
- nmcli con add type ethernet con-name public ifname enX0
- nmcli con mod public ipv4.address '${lookup(var.network0_ip_mapping, count.index + 1)}/${lookup(var.network0_prefix, count.index + 1)}'
- nmcli con mod public ipv4.method manual
- nmcli con mod public ipv4.ignore-auto-dns yes
- nmcli con mod public ipv4.gateway '${lookup(var.network0_gateway, count.index + 1)}'
- nmcli con mod public ipv4.dns "8.8.8.8 8.8.4.4"
- nmcli con mod public connection.autoconnect true
- nmcli con up public
- nmcli con add type ethernet con-name vlan80 ifname enX1
- nmcli con mod vlan80 ipv4.address '${lookup(var.network1_ip_mapping, count.index + 1)}/${var.network1_prefix}'
- nmcli con mod vlan80 ipv4.method manual
- nmcli con mod vlan80 ipv4.ignore-auto-dns yes
- nmcli con mod vlan80 ipv4.ignore-auto-routes yes
- nmcli con mod vlan80 ipv4.gateway '${var.network1_gateway}'
- nmcli con mod vlan80 ipv4.dns "${var.network1_gateway}"
- nmcli con mod vlan80 connection.autoconnect true
- nmcli con mod vlan80 ipv4.never-default true
- nmcli con mod vlan80 ipv6.never-default true
- nmcli con mod vlan80 ipv4.routes "10.0.0.0/8 ${var.network1_gateway}"
- nmcli con up vlan80
- nmcli con add type ethernet con-name vlan5 ifname enX2
- nmcli con mod vlan5 ipv4.address '${lookup(var.network2_ip_mapping, count.index + 1)}/${var.network2_prefix}'
- nmcli con mod vlan5 ipv4.method manual
- nmcli con mod vlan5 ipv4.ignore-auto-dns yes
- nmcli con mod vlan5 ipv4.ignore-auto-routes yes
- nmcli con mod vlan5 connection.autoconnect true
- nmcli con mod vlan5 ipv4.never-default true
- nmcli con mod vlan5 ipv6.never-default true
- nmcli con up vlan5
- systemctl restart NetworkManager
- dnf upgrade -y
- dnf install ipset ipvsadm -y
- bash /etc/sysconfig/modules/ipvs.modules
- dnf install chrony -y
- sudo systemctl enable --now chronyd
- yum install kernel-devel kernel-headers -y
- yum install elfutils-libelf-devel -y
- swapoff -a
- modprobe -- ip_tables
- systemctl disable --now firewalld.service
- systemctl disable --now rngd
- dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
- dnf install containerd.io tar -y
- dnf install policycoreutils-python-utils -y
- cat /tmp/selinux_kmod_drbd.log | sudo audit2allow -M insmoddrbd
- sudo semodule -i insmoddrbd.pp
- ${var.enrollment_command} ${lookup(var.node_type, count.index + 1)} ${lookup(var.node_networks, count.index + 1)}
bootcmd:
- swapoff -a
- modprobe -- ip_tables
EOF
}
resource "xenorchestra_vm" "master" {
count = 3
cpus = 4
memory_max = 8589934592
cloud_config = xenorchestra_cloud_config.node[count.index].template
name_label = lookup(var.vm_name, count.index + 1)
name_description = "${var.cluster_name} master"
template = data.xenorchestra_template.template.id
auto_poweron = true
affinity_host = lookup(var.preferred_host, count.index + 1)
network {
network_id = data.xenorchestra_network.net0.id
}
network {
network_id = data.xenorchestra_network.net1.id
}
network {
network_id = data.xenorchestra_network.net2.id
}
disk {
sr_id = lookup(var.host_count, count.index + 1)
name_label = "Terraform_disk_imavo"
size = 107374182400
}
}
resource "xenorchestra_vm" "worker" {
count = 3
cpus = 32
memory_max = 68719476736
cloud_config = xenorchestra_cloud_config.node[count.index + 3].template
name_label = lookup(var.vm_name, count.index + 3 + 1)
name_description = "${var.cluster_name} worker"
template = data.xenorchestra_template.template.id
auto_poweron = true
affinity_host = lookup(var.preferred_host, count.index + 3 + 1)
network {
network_id = data.xenorchestra_network.net0.id
}
network {
network_id = data.xenorchestra_network.net1.id
}
network {
network_id = data.xenorchestra_network.net2.id
}
disk {
sr_id = lookup(var.host_count, count.index + 3 + 1)
name_label = "Terraform_disk_imavo"
size = 322122547200
}
}
resource "xenorchestra_vm" "smtp" {
count = 3
cpus = 4
memory_max = 8589934592
cloud_config = xenorchestra_cloud_config.node[count.index + 6].template
name_label = lookup(var.vm_name, count.index + 6 + 1)
name_description = "${var.cluster_name} smtp worker"
template = data.xenorchestra_template.template.id
auto_poweron = true
affinity_host = lookup(var.preferred_host, count.index + 6 + 1)
network {
network_id = data.xenorchestra_network.net0.id
}
network {
network_id = data.xenorchestra_network.net1.id
}
network {
network_id = data.xenorchestra_network.net2.id
}
disk {
sr_id = lookup(var.host_count, count.index + 6 + 1)
name_label = "Terraform_disk_imavo"
size = 53687091200
}
}
OK we have debugged and improved this process, so including it here if it helps anyone else.
How to migrate resources between XOSTOR (linstor) clusters. This also works with piraeus-operator, which we use for k8s.
# PV: pvc-6408a214-6def-44c4-8d9a-bebb67be5510
# S: pgdata-snapshot
# s: 10741612544B
#get size
lvs --noheadings --units B -o lv_size linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000
#prep
lvcreate -V 10741612544B --thinpool linstor_group/thin_device -n pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000 linstor_group
#create snapshot
linstor --controller original-xostor-server s create pvc-6408a214-6def-44c4-8d9a-bebb67be5510 pgdata-snapshot
#send
thin_send linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000_pgdata-snapshot 2>/dev/null | ssh root@new-xostor-server-01 thin_recv linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000 2>/dev/null
Prep migration
[13:29 original-xostor-server ~]# lvs --noheadings --units B -o lv_size linstor_group/pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000
26851934208B
[13:53 new-xostor-server-01 ~]# lvcreate -V 26851934208B --thinpool linstor_group/thin_device -n pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000 linstor_group
Logical volume "pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000" created.
Create snapshot
15:35:03] jonathon@jonathon-framework:~$ linstor --controller original-xostor-server s create pvc-12aca72c-d94a-4c09-8102-0a6646906f8d s_test
SUCCESS:
Description:
New snapshot 's_test' of resource 'pvc-12aca72c-d94a-4c09-8102-0a6646906f8d' registered.
Details:
Snapshot 's_test' of resource 'pvc-12aca72c-d94a-4c09-8102-0a6646906f8d' UUID is: 3a07d2fd-6dc3-4994-b13f-8c3a2bb206b8
SUCCESS:
Suspended IO of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'ovbh-vprod-k8s04-worker02' for snapshot
SUCCESS:
Suspended IO of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'original-xostor-server' for snapshot
SUCCESS:
Took snapshot of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'ovbh-vprod-k8s04-worker02'
SUCCESS:
Took snapshot of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'original-xostor-server'
SUCCESS:
Resumed IO of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'ovbh-vprod-k8s04-worker02' after snapshot
SUCCESS:
Resumed IO of '[pvc-12aca72c-d94a-4c09-8102-0a6646906f8d]' on 'original-xostor-server' after snapshot
Migration
[13:53 original-xostor-server ~]# thin_send /dev/linstor_group/pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000_s_test 2>/dev/null | ssh root@new-xostor-server-01 thin_recv linstor_group/pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000 2>/dev/null
Need to yeet errors on both ends of command or it will fail.
This is the same setup process for replica-1 or replica-3. For replica-3 can target new-xostor-server-01 each time, for replica-1 be sure to spread them out right.
thin_send
to new-xostor-server-01, will need to run commands to force sync of data to replicas.
# PV: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
# snapshot: snipeit-snapshot
# size: 21483225088B
#get size
lvs --noheadings --units B -o lv_size linstor_group/pvc-96cbebbe-f827-4a47-ae95-38b078e0d584_00000
#prep
lvcreate -V 21483225088B --thinpool linstor_group/thin_device -n pvc-96cbebbe-f827-4a47-ae95-38b078e0d584_00000 linstor_group
#create snapshot
linstor --controller original-xostor-server s create pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 snipeit-snapshot
linstor --controller original-xostor-server s l | grep -e 'snipeit-snapshot'
#send
thin_send linstor_group/pvc-96cbebbe-f827-4a47-ae95-38b078e0d584_00000_snipeit-snapshot 2>/dev/null | ssh root@new-xostor-server-01 thin_recv linstor_group/pvc-96cbebbe-f827-4a47-ae95-38b078e0d584_00000 2>/dev/null
#linstor setup
linstor --controller new-xostor-server-01 resource-definition create pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 --resource-group sc-74e1434b-b435-587e-9dea-fa067deec898
linstor --controller new-xostor-server-01 volume-definition create pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 21483225088B --storage-pool xcp-sr-linstor_group_thin_device
linstor --controller new-xostor-server-01 resource create --storage-pool xcp-sr-linstor_group_thin_device --providers LVM_THIN new-xostor-server-01 pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
linstor --controller new-xostor-server-01 resource create --auto-place +1 pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
#Run the following on the node with the data. This is the prefered command
drbdadm invalidate-remote pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
#Run the following on the node without the data. This is just for reference
drbdadm invalidate pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
linstor --controller new-xostor-server-01 r l | grep -e 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584'
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
annotations:
pv.kubernetes.io/provisioned-by: linstor.csi.linbit.com
finalizers:
- external-provisioner.volume.kubernetes.io/finalizer
- kubernetes.io/pv-protection
- external-attacher/linstor-csi-linbit-com
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 20Gi # Ensure this matches the actual size of the LINSTOR volume
persistentVolumeReclaimPolicy: Retain
storageClassName: linstor-replica-three # Adjust to the storage class you want to use
volumeMode: Filesystem
csi:
driver: linstor.csi.linbit.com
fsType: ext4
volumeHandle: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
volumeAttributes:
linstor.csi.linbit.com/mount-options: ''
linstor.csi.linbit.com/post-mount-xfs-opts: ''
linstor.csi.linbit.com/uses-volume-context: 'true'
linstor.csi.linbit.com/remote-access-policy: 'true'
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
pv.kubernetes.io/bind-completed: 'yes'
pv.kubernetes.io/bound-by-controller: 'yes'
volume.beta.kubernetes.io/storage-provisioner: linstor.csi.linbit.com
volume.kubernetes.io/storage-provisioner: linstor.csi.linbit.com
finalizers:
- kubernetes.io/pvc-protection
name: pp-snipeit-pvc
namespace: snipe-it
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: linstor-replica-three
volumeMode: Filesystem
volumeName: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
jonathon@jonathon-framework:~$ linstor --controller new-xostor-server-01 resource-definition create pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 --resource-group sc-74e1434b-b435-587e-9dea-fa067deec898
SUCCESS:
Description:
New resource definition 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' created.
Details:
Resource definition 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' UUID is: 772692e2-3fca-4069-92e9-2bef22c68a6f
jonathon@jonathon-framework:~$ linstor --controller new-xostor-server-01 volume-definition create pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 21483225088B --storage-pool xcp-sr-linstor_group_thin_device
SUCCESS:
Successfully set property key(s): StorPoolName
SUCCESS:
New volume definition with number '0' of resource definition 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' created.
jonathon@jonathon-framework:~$ linstor --controller new-xostor-server-01 resource create --storage-pool xcp-sr-linstor_group_thin_device --providers LVM_THIN new-xostor-server-01 pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
SUCCESS:
Successfully set property key(s): StorPoolName
INFO:
Updated pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 DRBD auto verify algorithm to 'crct10dif-pclmul'
SUCCESS:
Description:
New resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on node 'new-xostor-server-01' registered.
Details:
Resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on node 'new-xostor-server-01' UUID is: 3072aaae-4a34-453e-bdc6-facb47809b3d
SUCCESS:
Description:
Volume with number '0' on resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on node 'new-xostor-server-01' successfully registered
Details:
Volume UUID is: 52b11ef6-ec50-42fb-8710-1d3f8c15c657
SUCCESS:
Created resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-01'
jonathon@jonathon-framework:~$ linstor --controller new-xostor-server-01 resource create --auto-place +1 pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
SUCCESS:
Successfully set property key(s): StorPoolName
SUCCESS:
Successfully set property key(s): StorPoolName
SUCCESS:
Description:
Resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' successfully autoplaced on 2 nodes
Details:
Used nodes (storage pool name): 'new-xostor-server-02 (xcp-sr-linstor_group_thin_device)', 'new-xostor-server-03 (xcp-sr-linstor_group_thin_device)'
INFO:
Resource-definition property 'DrbdOptions/Resource/quorum' updated from 'off' to 'majority' by auto-quorum
INFO:
Resource-definition property 'DrbdOptions/Resource/on-no-quorum' updated from 'off' to 'suspend-io' by auto-quorum
SUCCESS:
Added peer(s) 'new-xostor-server-02', 'new-xostor-server-03' to resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-01'
SUCCESS:
Created resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-02'
SUCCESS:
Created resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-03'
SUCCESS:
Description:
Resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-03' ready
Details:
Auto-placing resource: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
SUCCESS:
Description:
Resource 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584' on 'new-xostor-server-02' ready
Details:
Auto-placing resource: pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
At this point
jonathon@jonathon-framework:~$ linstor --controller new-xostor-server-01 v l | grep -e 'pvc-96cbebbe-f827-4a47-ae95-38b078e0d584'
| new-xostor-server-01 | pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 | xcp-sr-linstor_group_thin_device | 0 | 1032 | /dev/drbd1032 | 9.20 GiB | Unused | UpToDate |
| new-xostor-server-02 | pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 | xcp-sr-linstor_group_thin_device | 0 | 1032 | /dev/drbd1032 | 112.73 MiB | Unused | UpToDate |
| new-xostor-server-03 | pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 | xcp-sr-linstor_group_thin_device | 0 | 1032 | /dev/drbd1032 | 112.73 MiB | Unused | UpToDate |
To force the sync, run the following command on the node with the data
drbdadm invalidate-remote pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
This will kick it to get the data re-synced.
[14:51 new-xostor-server-01 ~]# drbdadm invalidate-remote pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
[14:51 new-xostor-server-01 ~]# drbdadm status pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 role:Secondary
disk:UpToDate
new-xostor-server-02 role:Secondary
replication:SyncSource peer-disk:Inconsistent done:1.14
new-xostor-server-03 role:Secondary
replication:SyncSource peer-disk:Inconsistent done:1.18
[14:51 new-xostor-server-01 ~]# drbdadm status pvc-96cbebbe-f827-4a47-ae95-38b078e0d584
pvc-96cbebbe-f827-4a47-ae95-38b078e0d584 role:Secondary
disk:UpToDate
new-xostor-server-02 role:Secondary
peer-disk:UpToDate
new-xostor-server-03 role:Secondary
peer-disk:UpToDate
See: https://github.com/LINBIT/linstor-server/issues/389
# PV: pvc-6408a214-6def-44c4-8d9a-bebb67be5510
# S: pgdata-snapshot
# s: 10741612544B
#get size
lvs --noheadings --units B -o lv_size linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000
#prep
lvcreate -V 10741612544B --thinpool linstor_group/thin_device -n pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000 linstor_group
#create snapshot
linstor --controller original-xostor-server s create pvc-6408a214-6def-44c4-8d9a-bebb67be5510 pgdata-snapshot
#send
thin_send linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000_pgdata-snapshot 2>/dev/null | ssh root@new-xostor-server-01 thin_recv linstor_group/pvc-6408a214-6def-44c4-8d9a-bebb67be5510_00000 2>/dev/null
# 1
linstor --controller new-xostor-server-01 resource-definition create pvc-6408a214-6def-44c4-8d9a-bebb67be5510 --resource-group sc-b066e430-6206-5588-a490-cc91ecef53d6
linstor --controller new-xostor-server-01 volume-definition create pvc-6408a214-6def-44c4-8d9a-bebb67be5510 10741612544B --storage-pool xcp-sr-linstor_group_thin_device
linstor --controller new-xostor-server-01 resource create new-xostor-server-01 pvc-6408a214-6def-44c4-8d9a-bebb67be5510
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-6408a214-6def-44c4-8d9a-bebb67be5510
annotations:
pv.kubernetes.io/provisioned-by: linstor.csi.linbit.com
finalizers:
- external-provisioner.volume.kubernetes.io/finalizer
- kubernetes.io/pv-protection
- external-attacher/linstor-csi-linbit-com
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 10Gi # Ensure this matches the actual size of the LINSTOR volume
persistentVolumeReclaimPolicy: Retain
storageClassName: linstor-replica-one-local # Adjust to the storage class you want to use
volumeMode: Filesystem
csi:
driver: linstor.csi.linbit.com
fsType: ext4
volumeHandle: pvc-6408a214-6def-44c4-8d9a-bebb67be5510
volumeAttributes:
linstor.csi.linbit.com/mount-options: ''
linstor.csi.linbit.com/post-mount-xfs-opts: ''
linstor.csi.linbit.com/uses-volume-context: 'true'
linstor.csi.linbit.com/remote-access-policy: |
- fromSame:
- xcp-ng/node
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: xcp-ng/node
operator: In
values:
- new-xostor-server-01
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
pv.kubernetes.io/bind-completed: 'yes'
pv.kubernetes.io/bound-by-controller: 'yes'
volume.beta.kubernetes.io/storage-provisioner: linstor.csi.linbit.com
volume.kubernetes.io/selected-node: ovbh-vtest-k8s01-worker01
volume.kubernetes.io/storage-provisioner: linstor.csi.linbit.com
finalizers:
- kubernetes.io/pvc-protection
name: acid-merch-2
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: linstor-replica-one-local
volumeMode: Filesystem
volumeName: pvc-6408a214-6def-44c4-8d9a-bebb67be5510
The reason that it may be socat, is because the commands fail when I try using it, as instructed by https://github.com/LINBIT/thin-send-recv
[13:03 ovbh-pprod-xen11 ~]# thin_send linstor_group/pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000_s_test 2>/dev/null | zstd | socat STDIN TCP:10.2.0.10:4321
2024/10/28 13:04:59 socat[25701] E write(5, 0x55da36101da0, 8192): Broken pipe
...
[13:03 ovbh-pprod-xen01 ~]# socat TCP-LISTEN:4321 STDOUT | zstd -d | thin_recv linstor_group/pvc-12aca72c-d94a-4c09-8102-0a6646906f8d_00000 2>/dev/null
2024/10/28 13:04:59 socat[27039] E read(1, 0x560ef6ff4350, 8192): Bad file descriptor
And the same thing happens if I exclude zstd
from both commands.
So, did more testing. Looks like thin_send_recv is not the problem, but maybe socat.
I am able to manually migrate resource between XOSTOR (linstor) cluster using thin_send_recv. I have encluded all steps below so that it can be replicated.
And we know socat is used, cause it complains if it is not there.
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 backup ship newCluster pvc-086a5817-d813-41fe-86d8-3fac2ae2028f pvc-086a5817-d813-41fe-86d8-3fac2ae2028f
INFO:
Cannot use node 'ovbh-pprod-xen10' as it does not support the tool(s): SOCAT
INFO:
Cannot use node 'ovbh-pprod-xen12' as it does not support the tool(s): SOCAT
INFO:
Cannot use node 'ovbh-pprod-xen13' as it does not support the tool(s): SOCAT
ERROR:
Backup shipping of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' cannot be started since there is no node available that supports backup shipping.
Using 1.0.1 thin_send_recv.
[16:16 ovbh-pprod-xen11 ~]# thin_send --version
1.0.1
[16:16 ovbh-pprod-xen01 ~]# thin_recv --version
1.0.1
Versions of socat match.
[16:16 ovbh-pprod-xen11 ~]# socat -V
socat by Gerhard Rieger and contributors - see www.dest-unreach.org
socat version 1.7.3.2 on Aug 4 2017 04:57:10
running on Linux version #1 SMP Tue Jan 23 14:12:55 CET 2024, release 4.19.0+1, machine x86_64
features:
#define WITH_STDIO 1
#define WITH_FDNUM 1
#define WITH_FILE 1
#define WITH_CREAT 1
#define WITH_GOPEN 1
#define WITH_TERMIOS 1
#define WITH_PIPE 1
#define WITH_UNIX 1
#define WITH_ABSTRACT_UNIXSOCKET 1
#define WITH_IP4 1
#define WITH_IP6 1
#define WITH_RAWIP 1
#define WITH_GENERICSOCKET 1
#define WITH_INTERFACE 1
#define WITH_TCP 1
#define WITH_UDP 1
#define WITH_SCTP 1
#define WITH_LISTEN 1
#define WITH_SOCKS4 1
#define WITH_SOCKS4A 1
#define WITH_PROXY 1
#define WITH_SYSTEM 1
#define WITH_EXEC 1
#define WITH_READLINE 1
#define WITH_TUN 1
#define WITH_PTY 1
#define WITH_OPENSSL 1
#undef WITH_FIPS
#define WITH_LIBWRAP 1
#define WITH_SYCLS 1
#define WITH_FILAN 1
#define WITH_RETRY 1
#define WITH_MSGLEVEL 0 /*debug*/
...
[16:17 ovbh-pprod-xen01 ~]# socat -V
socat by Gerhard Rieger and contributors - see www.dest-unreach.org
socat version 1.7.3.2 on Aug 4 2017 04:57:10
running on Linux version #1 SMP Tue Jan 23 14:12:55 CET 2024, release 4.19.0+1, machine x86_64
features:
#define WITH_STDIO 1
#define WITH_FDNUM 1
#define WITH_FILE 1
#define WITH_CREAT 1
#define WITH_GOPEN 1
#define WITH_TERMIOS 1
#define WITH_PIPE 1
#define WITH_UNIX 1
#define WITH_ABSTRACT_UNIXSOCKET 1
#define WITH_IP4 1
#define WITH_IP6 1
#define WITH_RAWIP 1
#define WITH_GENERICSOCKET 1
#define WITH_INTERFACE 1
#define WITH_TCP 1
#define WITH_UDP 1
#define WITH_SCTP 1
#define WITH_LISTEN 1
#define WITH_SOCKS4 1
#define WITH_SOCKS4A 1
#define WITH_PROXY 1
#define WITH_SYSTEM 1
#define WITH_EXEC 1
#define WITH_READLINE 1
#define WITH_TUN 1
#define WITH_PTY 1
#define WITH_OPENSSL 1
#undef WITH_FIPS
#define WITH_LIBWRAP 1
#define WITH_SYCLS 1
#define WITH_FILAN 1
#define WITH_RETRY 1
#define WITH_MSGLEVEL 0 /*debug*/
Migrating using only thin_send_recv
works.
I am curious if anyone else can replicate this, as it is just attempting to move a resource between two xostor clusters? If it is just me I can continue troubleshooting, otherwise it would be nice to know it is an exercise in futility.
But I am well aware that the release a few days ago has everyone swamped and this can wait, would just be awesome to know as it would change migration plans.
https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/#s-linstor-snapshots-shipping
thin-send-recv is needed to ship data when using LVM thin-provisioned volumes
Yeah this seems to be for any type of shipping, s3 or otherwise.
Turns out I did not have SOCAT
on the new linstor cluster, and that was why I was getting that error message
I am able to run the command
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 backup create linbit-velero-preprod-backup pvc-086a5817-d813-41fe-86d8-3fac2ae2028f
SUCCESS:
Suspended IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker01' for snapshot
SUCCESS:
Suspended IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker03' for snapshot
SUCCESS:
Suspended IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen13' for snapshot
SUCCESS:
Suspended IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen12' for snapshot
SUCCESS:
Suspended IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen10' for snapshot
SUCCESS:
Took snapshot of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker01'
SUCCESS:
Took snapshot of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker03'
SUCCESS:
Took snapshot of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen13'
SUCCESS:
Took snapshot of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen12'
SUCCESS:
Took snapshot of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen10'
SUCCESS:
Resumed IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker01' after snapshot
SUCCESS:
Resumed IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-vtest-k8s02-worker03' after snapshot
SUCCESS:
Resumed IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen13' after snapshot
SUCCESS:
Resumed IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen12' after snapshot
SUCCESS:
Resumed IO of '[pvc-086a5817-d813-41fe-86d8-3fac2ae2028f]' on 'ovbh-pprod-xen10' after snapshot
INFO:
Generated snapshot name for backup of resourcepvc-086a5817-d813-41fe-86d8-3fac2ae2028f to remote linbit-velero-preprod-backup
INFO:
Shipping of resource pvc-086a5817-d813-41fe-86d8-3fac2ae2028f to remote linbit-velero-preprod-backup in progress.
SUCCESS:
Started shipping of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f'
SUCCESS:
Started shipping of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f'
SUCCESS:
Started shipping of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f'
But over an hour later it has still not finished.
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 s l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceName ā SnapshotName ā NodeNames ā Volumes ā CreatedOn ā State ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā pvc-086a5817-d813-41fe-86d8-3fac2ae2028f ā back_20241009_161658_5ttp634a ā ovbh-pprod-xen01 ā 0: 8 GiB ā 2024-10-09 13:17:02 ā Restoring ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 rd l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceName ā Port ā ResourceGroup ā State ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā pvc-086a5817-d813-41fe-86d8-3fac2ae2028f ā ā DfltRscGrp ā ok ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
Seems like it might be the same issue as S3.
2024_10_09 16:17:00.885 [MainWorkerPool-11] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241009_161658' of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' registered.
2024_10_09 16:17:00.886 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000 size from 8390440 KiB to 8392704 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_09 16:17:01.034 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' [DRBD] adjusted.
2024_10_09 16:17:01.262 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 96
2024_10_09 16:17:01.262 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 97
2024_10_09 16:17:01.301 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 97
2024_10_09 16:17:01.301 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 98
2024_10_09 16:17:02.765 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 98
2024_10_09 16:17:02.766 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 99
2024_10_09 16:17:02.774 [MainWorkerPool-1] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241009_161658' of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' registered.
2024_10_09 16:17:02.774 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000 size from 8390440 KiB to 8392704 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_09 16:17:03.012 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' [DRBD] adjusted.
2024_10_09 16:17:03.037 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: 2024/10/09 16:17:03 socat[23463] E connect(5, AF=2 10.2.0.10:12012, 16): No route to host
2024_10_09 16:17:03.092 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_09 16:17:03.094 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_09 16:17:03.095 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 94208 bytes remaining
2024_10_09 16:17:03.095 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 57856 bytes remaining
2024_10_09 16:17:03.099 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 94208 bytes remaining
2024_10_09 16:17:03.100 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 57856 bytes remaining
2024_10_09 16:17:03.109 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 82432 bytes remaining
2024_10_09 16:17:03.248 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_09 16:17:03.249 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_09 16:17:03.250 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 94208 bytes remaining
2024_10_09 16:17:03.251 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 57856 bytes remaining
2024_10_09 16:17:03.254 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 94208 bytes remaining
2024_10_09 16:17:03.256 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 57856 bytes remaining
2024_10_09 16:17:03.266 [pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000_back_20241009_161658] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 82432 bytes remaining
2024_10_09 16:17:03.282 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 99
2024_10_09 16:17:03.282 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 100
2024_10_09 16:17:03.288 [MainWorkerPool-3] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241009_161658' of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' registered.
2024_10_09 16:17:03.289 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000 size from 8390440 KiB to 8392704 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_09 16:17:03.421 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' [DRBD] adjusted.
2024_10_09 16:17:03.644 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 100
2024_10_09 16:17:03.644 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 101
2024_10_09 16:17:03.674 [MainWorkerPool-5] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241009_161658' of resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' registered.
2024_10_09 16:17:03.674 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-086a5817-d813-41fe-86d8-3fac2ae2028f_00000 size from 8390440 KiB to 8392704 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_09 16:17:03.807 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-086a5817-d813-41fe-86d8-3fac2ae2028f' [DRBD] adjusted.
2024_10_09 16:17:04.031 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 101
2024_10_09 16:17:04.031 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 102
2024_10_09 16:47:03.682 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 102
2024_10_09 16:47:03.682 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 103
Full log: linstor-satellite.txt
No Error Report on either cluster
And now for something completely different lol. It's the same thing
We have a new xcp-ng cluster that we would like to migrate everything to. Not migrating k8s clusters, creating new ones on a new RKE2 rancher. So to migrate the applications it would simplify things if I could move pvc's over.
Command that fails, same if I add --target-storage-pool xcp-sr-linstor_group_thin_device
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 backup ship newCluster pvc-086a5817-d813-41fe-86d8-3fac2ae2028f pvc-086a5817-d813-41fe-86d8-3fac2ae2028f
ERROR:
Description:
Remote 'newCluster': Could not find suitable storage pool to receive backup
Cause:
ErrorReport id on target cluster: 66FF0E92-00000-000011
Setup remotes
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 controller list-properties
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā Key ā Value ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā Cluster/LocalID ā 941fc610-acb9-484a-9837-d2c0df8a86aa
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 controller list-properties
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā Key ā Value ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā Cluster/LocalID ā 717be8f7-1aec-4830-9aab-cc0afba0dd3a
linstor --controller 10.2.0.19 remote create linstor newCluster 10.2.0.10 --cluster-id 717be8f7-1aec-4830-9aab-cc0afba0dd3a
linstor --controller 10.2.0.10 remote create linstor sourceCluster 10.2.0.19 --cluster-id 941fc610-acb9-484a-9837-d2c0df8a86aa
Nothing interesting in any satellite logs.
Error on new cluster
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 err show 66FF0E92-00000-000011
ERROR REPORT 66FF0E92-00000-000011
============================================================
Application: LINBITĀ® LINSTOR
Module: Controller
Version: 1.26.1
Build ID: 12746ac9c6e7882807972c3df56e9a89eccad4e5
Build time: 2024-02-22T05:27:50+00:00
Error time: 2024-10-07 14:51:35
Node: ovbh-pprod-xen01
Thread: MainWorkerPool-3
============================================================
Reported error:
===============
Category: RuntimeException
Class name: ApiRcException
Class canonical name: com.linbit.linstor.core.apicallhandler.response.ApiRcException
Generated at: Method 'restoreBackupL2LInTransaction', Source file 'CtrlBackupRestoreApiCallHandler.java', Line #1123
Error message: Could not find suitable storage pool to receive backup
Asynchronous stage backtrace:
Error has been observed at the following site(s):
*__checkpoint ā¢ restore backup
*__checkpoint ā¢ Backupshipping L2L start receive
Original Stack Trace:
Call backtrace:
Method Native Class:Line number
restoreBackupL2LInTransaction N com.linbit.linstor.core.apicallhandler.controller.backup.CtrlBackupRestoreApiCallHandler:1123
Suppressed exception 1 of 1:
===============
Category: RuntimeException
Class name: OnAssemblyException
Class canonical name: reactor.core.publisher.FluxOnAssembly.OnAssemblyException
Generated at: Method 'restoreBackupL2LInTransaction', Source file 'CtrlBackupRestoreApiCallHandler.java', Line #1123
Error message:
Error has been observed at the following site(s):
*__checkpoint ā¢ restore backup
*__checkpoint ā¢ Backupshipping L2L start receive
Original Stack Trace:
Call backtrace:
Method Native Class:Line number
restoreBackupL2LInTransaction N com.linbit.linstor.core.apicallhandler.controller.backup.CtrlBackupRestoreApiCallHandler:1123
lambda$startReceivingInTransaction$4 N com.linbit.linstor.core.apicallhandler.controller.backup.CtrlBackupL2LDstApiCallHandler:526
doInScope N com.linbit.linstor.core.apicallhandler.ScopeRunner:149
lambda$fluxInScope$0 N com.linbit.linstor.core.apicallhandler.ScopeRunner:76
call N reactor.core.publisher.MonoCallable:72
trySubscribeScalarMap N reactor.core.publisher.FluxFlatMap:127
subscribeOrReturn N reactor.core.publisher.MonoFlatMapMany:49
subscribe N reactor.core.publisher.Flux:8759
onNext N reactor.core.publisher.MonoFlatMapMany$FlatMapManyMain:195
request N reactor.core.publisher.Operators$ScalarSubscription:2545
onSubscribe N reactor.core.publisher.MonoFlatMapMany$FlatMapManyMain:141
subscribe N reactor.core.publisher.MonoJust:55
subscribe N reactor.core.publisher.MonoDeferContextual:55
subscribe N reactor.core.publisher.Flux:8773
onNext N reactor.core.publisher.MonoFlatMapMany$FlatMapManyMain:195
onNext N reactor.core.publisher.FluxMapFuseable$MapFuseableSubscriber:129
completePossiblyEmpty N reactor.core.publisher.Operators$BaseFluxToMonoOperator:2071
onComplete N reactor.core.publisher.MonoCollect$CollectSubscriber:145
onComplete N reactor.core.publisher.MonoFlatMapMany$FlatMapManyInner:260
checkTerminated N reactor.core.publisher.FluxFlatMap$FlatMapMain:847
drainLoop N reactor.core.publisher.FluxFlatMap$FlatMapMain:609
drain N reactor.core.publisher.FluxFlatMap$FlatMapMain:589
onComplete N reactor.core.publisher.FluxFlatMap$FlatMapMain:466
checkTerminated N reactor.core.publisher.FluxFlatMap$FlatMapMain:847
drainLoop N reactor.core.publisher.FluxFlatMap$FlatMapMain:609
innerComplete N reactor.core.publisher.FluxFlatMap$FlatMapMain:895
onComplete N reactor.core.publisher.FluxFlatMap$FlatMapInner:998
onComplete N reactor.core.publisher.FluxMap$MapSubscriber:144
onComplete N reactor.core.publisher.Operators$MultiSubscriptionSubscriber:2205
onComplete N reactor.core.publisher.FluxSwitchIfEmpty$SwitchIfEmptySubscriber:85
complete N reactor.core.publisher.FluxCreate$BaseSink:460
drain N reactor.core.publisher.FluxCreate$BufferAsyncSink:805
complete N reactor.core.publisher.FluxCreate$BufferAsyncSink:753
drainLoop N reactor.core.publisher.FluxCreate$SerializedFluxSink:247
drain N reactor.core.publisher.FluxCreate$SerializedFluxSink:213
complete N reactor.core.publisher.FluxCreate$SerializedFluxSink:204
apiCallComplete N com.linbit.linstor.netcom.TcpConnectorPeer:506
handleComplete N com.linbit.linstor.proto.CommonMessageProcessor:372
handleDataMessage N com.linbit.linstor.proto.CommonMessageProcessor:296
doProcessInOrderMessage N com.linbit.linstor.proto.CommonMessageProcessor:244
lambda$doProcessMessage$4 N com.linbit.linstor.proto.CommonMessageProcessor:229
subscribe N reactor.core.publisher.FluxDefer:46
subscribe N reactor.core.publisher.Flux:8773
onNext N reactor.core.publisher.FluxFlatMap$FlatMapMain:427
drainAsync N reactor.core.publisher.FluxFlattenIterable$FlattenIterableSubscriber:453
drain N reactor.core.publisher.FluxFlattenIterable$FlattenIterableSubscriber:724
onNext N reactor.core.publisher.FluxFlattenIterable$FlattenIterableSubscriber:256
drainFused N reactor.core.publisher.SinkManyUnicast:319
drain N reactor.core.publisher.SinkManyUnicast:362
tryEmitNext N reactor.core.publisher.SinkManyUnicast:237
tryEmitNext N reactor.core.publisher.SinkManySerialized:100
processInOrder N com.linbit.linstor.netcom.TcpConnectorPeer:419
doProcessMessage N com.linbit.linstor.proto.CommonMessageProcessor:227
lambda$processMessage$2 N com.linbit.linstor.proto.CommonMessageProcessor:164
onNext N reactor.core.publisher.FluxPeek$PeekSubscriber:185
runAsync N reactor.core.publisher.FluxPublishOn$PublishOnSubscriber:440
run N reactor.core.publisher.FluxPublishOn$PublishOnSubscriber:527
call N reactor.core.scheduler.WorkerTask:84
call N reactor.core.scheduler.WorkerTask:37
run N java.util.concurrent.FutureTask:264
run N java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask:304
runWorker N java.util.concurrent.ThreadPoolExecutor:1128
run N java.util.concurrent.ThreadPoolExecutor$Worker:628
run N java.lang.Thread:829
END OF ERROR REPORT.
Info on new cluster
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 r l | grep -e "pvc-086a5817-d813-41fe-86d8-3fac2ae2028f"
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | ovbh-pprod-xen10 | 7117 | Unused | Ok | UpToDate | 2023-05-31 14:42:09 |
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | ovbh-pprod-xen12 | 7117 | Unused | Ok | UpToDate | 2023-05-31 14:42:09 |
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | ovbh-pprod-xen13 | 7117 | Unused | Ok | UpToDate | 2023-05-31 14:42:07 |
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | ovbh-vtest-k8s02-worker01 | 7117 | InUse | Ok | Diskless | 2024-08-09 11:31:25 |
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | ovbh-vtest-k8s02-worker03 | 7117 | Unused | Ok | Diskless | 2024-06-13 14:15:57 |
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 rd l | grep -e "pvc-086a5817-d813-41fe-86d8-3fac2ae2028f"
| pvc-086a5817-d813-41fe-86d8-3fac2ae2028f | 7117 | sc-74e1434b-b435-587e-9dea-fa067deec898 | ok |
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 rg l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceGroup ā SelectFilter ā VlmNrs ā Description ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā DfltRscGrp ā PlaceCount: 2 ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā sc-74e1434b-b435-587e-9dea-fa067deec898 ā PlaceCount: 3 ā 0 ā ā
ā ā DisklessOnRemaining: True ā ā ā
ā ā LayerStack: ['DRBD', 'STORAGE'] ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā sc-b066e430-6206-5588-a490-cc91ecef53d6 ā PlaceCount: 1 ā 0 ā ā
ā ā DisklessOnRemaining: True ā ā ā
ā ā LayerStack: ['DRBD', 'STORAGE'] ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā xcp-sr-linstor_group_thin_device ā PlaceCount: 3 ā 0 ā ā
ā ā StoragePool(s): xcp-sr-linstor_group_thin_device ā ā ā
ā ā DisklessOnRemaining: True ā ā ā
ā ā LayerStack: ['DRBD', 'STORAGE'] ā ā ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.19 sp l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā StoragePool ā Node ā Driver ā PoolName ā FreeCapacity ā TotalCapacity ā CanSnapshots ā State ā SharedName ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā DfltDisklessStorPool ā ovbh-pprod-xen10 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen10;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-pprod-xen11 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen11;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-pprod-xen12 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen12;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-pprod-xen13 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen13;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-k8s04-worker01 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-k8s04-worker01;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-k8s04-worker02 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-k8s04-worker02;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-k8s04-worker03 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-k8s04-worker03;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-k8s04-worker07 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-k8s04-worker07;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s02-worker01 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s02-worker01;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s02-worker02 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s02-worker02;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s02-worker03 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s02-worker03;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s02-worker04 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s02-worker04;DfltDisklessStorPool ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen10 ā LVM_THIN ā linstor_group/thin_device ā 2.48 TiB ā 3.49 TiB ā True ā Ok ā ovbh-pprod-xen10;xcp-sr-linstor_group_thin_device ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen11 ā LVM_THIN ā linstor_group/thin_device ā 2.42 TiB ā 3.49 TiB ā True ā Ok ā ovbh-pprod-xen11;xcp-sr-linstor_group_thin_device ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen12 ā LVM_THIN ā linstor_group/thin_device ā 2.83 TiB ā 3.49 TiB ā True ā Ok ā ovbh-pprod-xen12;xcp-sr-linstor_group_thin_device ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen13 ā LVM_THIN ā linstor_group/thin_device ā 4.12 TiB ā 4.99 TiB ā True ā Ok ā ovbh-pprod-xen13;xcp-sr-linstor_group_thin_device ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
On the new cluster
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 rg l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceGroup ā SelectFilter ā VlmNrs ā Description ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā DfltRscGrp ā PlaceCount: 2 ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā sc-74e1434b-b435-587e-9dea-fa067deec898 ā PlaceCount: 3 ā 0 ā ā
ā ā DisklessOnRemaining: True ā ā ā
ā ā LayerStack: ['DRBD', 'STORAGE'] ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā xcp-ha-linstor_group_thin_device ā PlaceCount: 3 ā 0 ā ā
ā ā StoragePool(s): xcp-sr-linstor_group_thin_device ā ā ā
ā ā DisklessOnRemaining: False ā ā ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā xcp-sr-linstor_group_thin_device ā PlaceCount: 3 ā 0 ā ā
ā ā StoragePool(s): xcp-sr-linstor_group_thin_device ā ā ā
ā ā DisklessOnRemaining: False ā ā ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 sp l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā StoragePool ā Node ā Driver ā PoolName ā FreeCapacity ā TotalCapacity ā CanSnapshots ā State ā SharedName ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā DfltDisklessStorPool ā ovbh-pprod-xen01 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen01;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-pprod-xen02 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen02;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-pprod-xen03 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-pprod-xen03;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-rancher01 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-rancher01;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-rancher02 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-rancher02;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vprod-rancher03 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vprod-rancher03;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s01-worker01 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s01-worker01;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s01-worker02 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s01-worker02;DfltDisklessStorPool ā
ā DfltDisklessStorPool ā ovbh-vtest-k8s01-worker03 ā DISKLESS ā ā ā ā False ā Ok ā ovbh-vtest-k8s01-worker03;DfltDisklessStorPool ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen01 ā LVM_THIN ā linstor_group/thin_device ā 13.75 TiB ā 13.97 TiB ā True ā Ok ā ovbh-pprod-xen01;xcp-sr-linstor_group_thin_device ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen02 ā LVM_THIN ā linstor_group/thin_device ā 13.75 TiB ā 13.97 TiB ā True ā Ok ā ovbh-pprod-xen02;xcp-sr-linstor_group_thin_device ā
ā xcp-sr-linstor_group_thin_device ā ovbh-pprod-xen03 ā LVM_THIN ā linstor_group/thin_device ā 13.75 TiB ā 13.97 TiB ā True ā Ok ā ovbh-pprod-xen03;xcp-sr-linstor_group_thin_device ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
@BHellman Appreciate the weigh in and the time from your dev.
Ok yeah I thought I was having a hallucination lol. v1.0.1 was 100% working when I installed it at time of my posting, and it was failing today. Restarting all the satellites and it works, assuming it will break again.
When it actually works, I can see the pvc in s3 remote
Here are a scatter of commands and outputs. In this I restarted the satellites, so it may be difficult to read but thought it would be better then nothing.
commands-and-outputs.txt
xen01-linstor-Satellite.txt
xen02-linstor-Satellite.txt
xen03-linstor-Satellite.txt
Ok great, I manually built 1.0.1, and it works just like the package I got online, that means that what I am doing is working and the build process is correct.
The bad new is there is a breaking change with v1.1.2, and I think I am potentially SOL.
I am going to build and test v1.1.0 and v1.1.1 to see which ones work. NVM v1.1.0 is also broken.
So the change that breaks it is in here: https://github.com/LINBIT/thin-send-recv/compare/6b7c9002cd7716ff6ef93f5a5e8908032b81f853...e44f566ea0c975e2baa475868ebc176065a5b22d
v1.0.1 might just be the version that works with the version of linstor, and whenever that gets updated it might call for a newer version of thin-send-recv.
Ok, so, turns out this is because of the thin-send-recv
package I build from https://github.com/LINBIT/thin-send-recv/tree/master
I just swapped out the version I built for the last one I was able to get online to test, and it works.
The last version I was able to get from any repository before they went 403 was thin-send-recv-1.0.1-1.x86_64.rpm.txt, I was able to get this from https://piraeus.daocloud.io/linbit/rpms/7/x86_64/thin-send-recv-1.0.1-1.x86_64.rpm. FYI https://packages.linbit.com/yum/sles12-sp2/drbd-9.0/x86_64/Packages/ returns 403's too so no point in looking for it there if they have it hosted.
I built thin-send-recv-1.1.2-1.xcpng8.2.x86_64.rpm.txt using this doc I put together thin-send-recv.txt. But this package I built is resulting in the error posted previously.
So I am a bit at a loss, I want to be able to use velero for backing up pvs which are not managed by an operator with backup capabilities, but I do not want to be stuck with this old version I can not update.
Any advice would be greatly appreciated!
Hello all!
I have an issue with backing up to S3. I am hoping someone can point out the mistake I am making.
Our xcp-ng hosts are all up to date.
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 remote l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā Name ā Type ā Info ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā linbit-velero-backup ā S3 ā us-east-1.s3.wasabisys.com/velero-preprod ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 backup create linbit-velero-backup pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d
SUCCESS:
Suspended IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-vtest-k8s01-worker02' for snapshot
SUCCESS:
Suspended IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen02' for snapshot
SUCCESS:
Suspended IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen03' for snapshot
SUCCESS:
Suspended IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen01' for snapshot
SUCCESS:
Took snapshot of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-vtest-k8s01-worker02'
SUCCESS:
Took snapshot of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen02'
SUCCESS:
Took snapshot of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen03'
SUCCESS:
Took snapshot of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen01'
SUCCESS:
Resumed IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-vtest-k8s01-worker02' after snapshot
SUCCESS:
Resumed IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen01' after snapshot
SUCCESS:
Resumed IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen02' after snapshot
SUCCESS:
Resumed IO of '[pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d]' on 'ovbh-pprod-xen03' after snapshot
INFO:
Generated snapshot name for backup of resourcepvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d to remote linbit-velero-backup
INFO:
Shipping of resource pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d to remote linbit-velero-backup in progress.
SUCCESS:
Started shipping of resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d'
SUCCESS:
Started shipping of resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d'
SUCCESS:
Started shipping of resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d'
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 snapshot l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceName ā SnapshotName ā NodeNames ā Volumes ā CreatedOn ā State ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d ā back_20241002_191139 ā ovbh-pprod-xen01, ovbh-pprod-xen02, ovbh-pprod-xen03 ā 0: 50 GiB ā 2024-10-02 16:11:40 ā Shipping ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 backup list linbit-velero-backup
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā Resource ā Snapshot ā Finished at ā Based On ā Status ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
jonathon@jonathon-framework:~$ linstor --controller 10.2.0.10 snapshot l
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā®
ā ResourceName ā SnapshotName ā NodeNames ā Volumes ā CreatedOn ā State ā
āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā”
ā pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d ā back_20241002_191139 ā ovbh-pprod-xen01, ovbh-pprod-xen02, ovbh-pprod-xen03 ā 0: 50 GiB ā 2024-10-02 16:11:40 ā Successful ā
ā°āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāÆ
Nothing shows up on S3.
And after, enabling logs by modifying /usr/share/linstor-server/lib/conf/logback.xml I see the following
[19:15 ovbh-pprod-xen01 ~]# tail /var/log/linstor-satellite/linstor-Satellite.log -n 20
2024_10_02 19:11:41.511 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_02 19:11:41.512 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_02 19:11:41.513 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 40960 bytes remaining
2024_10_02 19:11:41.516 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - stdErr: Device read short 82432 bytes remaining
2024_10_02 19:11:41.543 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 42
2024_10_02 19:11:41.543 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 43
2024_10_02 19:11:41.552 [MainWorkerPool-5] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241002_191139' of resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d' registered.
2024_10_02 19:11:41.553 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000 size from 52440040 KiB to 52441088 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_02 19:11:41.615 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d' [DRBD] adjusted.
2024_10_02 19:11:41.781 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 43
2024_10_02 19:11:41.781 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 44
2024_10_02 19:11:47.220 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - stdErr: Incomplete copy_data, 4194304 bytes missing.
2024_10_02 19:11:47.295 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] WARN LINSTOR/Satellite - SYSTEM - Exception occurred while checking for support of requester-pays on remote linbit-velero-backup. Defaulting to false
2024_10_02 19:11:47.307 [MainWorkerPool-7] INFO LINSTOR/Satellite - SYSTEM - Snapshot 'back_20241002_191139' of resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d' registered.
2024_10_02 19:11:47.309 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Aligning /dev/linstor_group/pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000 size from 52440040 KiB to 52441088 KiB to be a multiple of extent size 4096 KiB (from Storage Pool)
2024_10_02 19:11:47.312 [shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139] ERROR LINSTOR/Satellite - SYSTEM - [Report number 66FDD1AE-3AE91-000000]
2024_10_02 19:11:47.398 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Resource 'pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d' [DRBD] adjusted.
2024_10_02 19:11:47.561 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - End DeviceManager cycle 44
2024_10_02 19:11:47.561 [DeviceManager] INFO LINSTOR/Satellite - SYSTEM - Begin DeviceManager cycle 45
The error
[19:12 ovbh-pprod-xen01 ~]# cat /var/log/linstor-satellite/ErrorReport-66FDD1AE-3AE91-000000.log
ERROR REPORT 66FDD1AE-3AE91-000000
============================================================
Application: LINBITĀ® LINSTOR
Module: Satellite
Version: 1.26.1
Build ID: 12746ac9c6e7882807972c3df56e9a89eccad4e5
Build time: 2024-02-22T05:27:50+00:00
Error time: 2024-10-02 19:11:47
Node: ovbh-pprod-xen01
Thread: shipping_pvc-7746af6f-d37e-4c5d-9f44-9616f2f9b33d_00000_back_20241002_191139
============================================================
Reported error:
===============
Category: RuntimeException
Class name: AbortedException
Class canonical name: com.amazonaws.AbortedException
Generated at: Method 'handleInterruptedException', Source file 'AmazonHttpClient.java', Line #880
Error message:
Call backtrace:
Method Native Class:Line number
handleInterruptedException N com.amazonaws.http.AmazonHttpClient$RequestExecutor:880
execute N com.amazonaws.http.AmazonHttpClient$RequestExecutor:757
access$500 N com.amazonaws.http.AmazonHttpClient$RequestExecutor:715
execute N com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl:697
execute N com.amazonaws.http.AmazonHttpClient:561
execute N com.amazonaws.http.AmazonHttpClient:541
invoke N com.amazonaws.services.s3.AmazonS3Client:5516
invoke N com.amazonaws.services.s3.AmazonS3Client:5463
abortMultipartUpload N com.amazonaws.services.s3.AmazonS3Client:3620
abortMultipart N com.linbit.linstor.api.BackupToS3:199
threadFinished N com.linbit.linstor.backupshipping.BackupShippingS3Daemon:320
run N com.linbit.linstor.backupshipping.BackupShippingS3Daemon:298
run N java.lang.Thread:829
Caused by:
==========
Category: Exception
Class name: SdkInterruptedException
Class canonical name: com.amazonaws.http.timers.client.SdkInterruptedException
Generated at: Method 'checkInterrupted', Source file 'AmazonHttpClient.java', Line #935
Call backtrace:
Method Native Class:Line number
checkInterrupted N com.amazonaws.http.AmazonHttpClient$RequestExecutor:935
checkInterrupted N com.amazonaws.http.AmazonHttpClient$RequestExecutor:921
executeHelper N com.amazonaws.http.AmazonHttpClient$RequestExecutor:1115
doExecute N com.amazonaws.http.AmazonHttpClient$RequestExecutor:814
executeWithTimer N com.amazonaws.http.AmazonHttpClient$RequestExecutor:781
execute N com.amazonaws.http.AmazonHttpClient$RequestExecutor:755
access$500 N com.amazonaws.http.AmazonHttpClient$RequestExecutor:715
execute N com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl:697
execute N com.amazonaws.http.AmazonHttpClient:561
execute N com.amazonaws.http.AmazonHttpClient:541
invoke N com.amazonaws.services.s3.AmazonS3Client:5516
invoke N com.amazonaws.services.s3.AmazonS3Client:5463
abortMultipartUpload N com.amazonaws.services.s3.AmazonS3Client:3620
abortMultipart N com.linbit.linstor.api.BackupToS3:199
threadFinished N com.linbit.linstor.backupshipping.BackupShippingS3Daemon:320
run N com.linbit.linstor.backupshipping.BackupShippingS3Daemon:298
run N java.lang.Thread:829
END OF ERROR REPORT.
Attempting to migrate an existing disk into linstor fails. The following error is printed after the task is complete.
Linstor version
[15:55 ovbh-pprod-xen10 ~]# linstor controller version
linstor controller 1.24.2; GIT-hash: adb19ca96a07039401023410c1ea116f09929295
[15:55 ovbh-pprod-xen10 ~]# linstor --version
linstor-client 1.19.0; GIT-hash: 500970a65717de3eeba002ee32ceb668ebd654b1
Error:
vdi.migrate
{
"id": "5ba9251a-42ef-489e-bcdb-6a0b21bd8e44",
"sr_id": "a8b860a9-5246-0dd2-8b7f-4806604f219a"
}
{
"code": "SR_BACKEND_FAILURE_202",
"params": [
"",
"General backend error [opterr=Failed to refresh VDI 6733dbf5-6ff9-4ca9-9986-e2f0173b5fbc]",
""
],
"task": {
"uuid": "75204992-0f80-189e-4679-7ed709398371",
"name_label": "Async.VDI.pool_migrate",
"name_description": "",
"allowed_operations": [],
"current_operations": {},
"created": "20240305T20:25:27Z",
"finished": "20240305T20:31:44Z",
"status": "failure",
"resident_on": "OpaqueRef:b5e3bf1f-2c0e-4272-85f5-7cde69a8d98e",
"progress": 1,
"type": "<none/>",
"result": "",
"error_info": [
"SR_BACKEND_FAILURE_202",
"",
"General backend error [opterr=Failed to refresh VDI 6733dbf5-6ff9-4ca9-9986-e2f0173b5fbc]",
""
],
"other_config": {},
"subtask_of": "OpaqueRef:NULL",
"subtasks": [],
"backtrace": "(((process xapi)(filename ocaml/xapi-client/client.ml)(line 7))((process xapi)(filename ocaml/xapi-client/client.ml)(line 19))((process xapi)(filename ocaml/xapi-client/client.ml)(line 12359))((process xapi)(filename lib/xapi-stdext-pervasives/pervasiveext.ml)(line 24))((process xapi)(filename lib/xapi-stdext-pervasives/pervasiveext.ml)(line 35))((process xapi)(filename ocaml/xapi/message_forwarding.ml)(line 134))((process xapi)(filename lib/xapi-stdext-pervasives/pervasiveext.ml)(line 24))((process xapi)(filename lib/xapi-stdext-pervasives/pervasiveext.ml)(line 35))((process xapi)(filename lib/xapi-stdext-pervasives/pervasiveext.ml)(line 24))((process xapi)(filename ocaml/xapi/rbac.ml)(line 205))((process xapi)(filename ocaml/xapi/server_helpers.ml)(line 95)))"
},
"message": "SR_BACKEND_FAILURE_202(, General backend error [opterr=Failed to refresh VDI 6733dbf5-6ff9-4ca9-9986-e2f0173b5fbc], )",
"name": "XapiError",
"stack": "XapiError: SR_BACKEND_FAILURE_202(, General backend error [opterr=Failed to refresh VDI 6733dbf5-6ff9-4ca9-9986-e2f0173b5fbc], )
at Function.wrap (file:///opt/xen-orchestra/packages/xen-api/_XapiError.mjs:16:12)
at default (file:///opt/xen-orchestra/packages/xen-api/_getTaskResult.mjs:11:29)
at Xapi._addRecordToCache (file:///opt/xen-orchestra/packages/xen-api/index.mjs:1006:24)
at file:///opt/xen-orchestra/packages/xen-api/index.mjs:1040:14
at Array.forEach (<anonymous>)
at Xapi._processEvents (file:///opt/xen-orchestra/packages/xen-api/index.mjs:1030:12)
at Xapi._watchEvents (file:///opt/xen-orchestra/packages/xen-api/index.mjs:1203:14)
at runNextTicks (node:internal/process/task_queues:60:5)
at processImmediate (node:internal/timers:447:9)
at process.callbackTrampoline (node:internal/async_hooks:128:17)"
}
It says I am three commits behind, but I just checked the update.
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 6477 100 6477 0 0 72775 0 --:--:-- --:--:-- --:--:-- 73602
installed : v18.19.1 (with npm 10.2.4)
Stopping xo-server...
Warning: apt-key output should not be parsed (stdout is not a terminal)
Checking for Yarn package...
Checking for Yarn update...
Reading package lists...
Building dependency tree...
Reading state information...
yarn is already the newest version (1.22.19-1).
0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded.
Checking for missing dependencies...
Checking for Repo change...
Checking xen-orchestra...
Current branch master
Current version 5.138.0 / 5.139.0
Current commit 0b00c1a6547e2e07feb49d3cd39cb1ffed3ed213 2024-03-04 14:42:06 +0100
3 updates available
Updating from source...
No local changes to save
No stash entries found.
Updating 0b00c1a65..37c2cfd99
Fast-forward
CHANGELOG.md | 13 +++++++++++++
CHANGELOG.unreleased.md | 7 -------
packages/xo-server/package.json | 2 +-
packages/xo-server/src/api/xostor.mjs | 3 ++-
packages/xo-web/package.json | 2 +-
5 files changed, 17 insertions(+), 10 deletions(-)
Clearing directories...
Installing...
yarn install v1.22.19
[1/5] Validating package.json...
[2/5] Resolving packages...
[3/5] Fetching packages...
[4/5] Linking dependencies...
warning "@commitlint/cli > @commitlint/load > cosmiconfig-typescript-loader@5.0.0" has unmet peer dependency "@types/node@*".
warning "@commitlint/cli > @commitlint/load > cosmiconfig-typescript-loader@5.0.0" has unmet peer dependency "typescript@>=4".
warning "@vue/eslint-config-typescript > @typescript-eslint/eslint-plugin > ts-api-utils@1.2.1" has unmet peer dependency "typescript@>=4.2.0".
warning "workspace-aggregator-8dfe1888-d206-4158-b73c-c08f391a75fb > @vates/node-vsphere-soap > soap@1.0.0" has incorrect peer dependency "axios@^0.27.2".
warning "workspace-aggregator-8dfe1888-d206-4158-b73c-c08f391a75fb > @vates/event-listeners-manager > tap > @tapjs/test > @isaacs/ts-node-temp-fork-for-pr-2009@10.9.5" has unmet peer dependency "@types/node@*".
warning "workspace-aggregator-8dfe1888-d206-4158-b73c-c08f391a75fb > @vates/event-listeners-manager > tap > @tapjs/asserts > tcompare > react-element-to-jsx-string@15.0.0" has unmet peer dependency "react@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1 || ^18.0.0".
warning "workspace-aggregator-8dfe1888-d206-4158-b73c-c08f391a75fb > @vates/event-listeners-manager > tap > @tapjs/asserts > tcompare > react-element-to-jsx-string@15.0.0" has unmet peer dependency "react-dom@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1 || ^18.0.0".
warning Workspaces can only be enabled in private projects.
[5/5] Building fresh packages...
$ husky install
husky - Git hooks installed
Done in 26.21s.
yarn run v1.22.19
$ TURBO_TELEMETRY_DISABLED=1 turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web
ā¢ Packages in scope: xo-server, xo-server-audit, xo-server-auth-github, xo-server-auth-google, xo-server-auth-ldap, xo-server-auth-oidc, xo-server-auth-saml, xo-server-backup-reports, xo-server-load-balancer, xo-server-netbox, xo-server-perf-alert, xo-server-sdn-controller, xo-server-test, xo-server-test-plugin, xo-server-transport-email, xo-server-transport-icinga2, xo-server-transport-nagios, xo-server-transport-slack, xo-server-transport-xmpp, xo-server-usage-report, xo-server-web-hooks, xo-web
ā¢ Running build in 22 packages
ā¢ Remote caching disabled
Tasks: 25 successful, 25 total
Cached: 23 cached, 25 total
Time: 2m2.266s
Done in 122.81s.
Updated version 5.138.1 / 5.140.0
Updated commit 37c2cfd99e004d669950e70d1da61d09a4596e71 2024-03-05 17:50:39 +0100
Checking plugins...
Ignoring xo-server-test plugin
Cleanup plugins...
Restarting xo-server...
The problem was yum cache. If I did yum update right after yum update xcp-ng-release-linstor
it would still fail. To get it working right away did the following
yum update xcp-ng-release-linstor
yum clean all
yum update
Looks like the package is listed twice?
[12:32 ovbh-pprod-xen10 ~]# yum update xcp-ng-linstor
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Excluding mirror: updates.xcp-ng.org
* xcp-ng-base: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-updates: mirrors.xcp-ng.org
Resolving Dependencies
--> Running transaction check
---> Package xcp-ng-linstor.noarch 0:1.0-1.xcpng8.2 will be updated
---> Package xcp-ng-linstor.noarch 0:1.1-3.xcpng8.2 will be an update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
--> Finished Dependency Resolution
Error: Package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch (xcp-ng-updates)
Requires: sm-linstor
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
[12:34 ovbh-pprod-xen10 ~]# yum list
[...]
xcp-networkd.x86_64 0.56.2-8.xcpng8.2 xcp-ng-updates
xcp-networkd-debuginfo.x86_64 0.56.2-8.xcpng8.2 xcp-ng-updates
xcp-ng-generic-lib-devel.x86_64 1.1.1-3.xcpng8.2 xcp-ng-base
xcp-ng-linstor.noarch 1.1-3.xcpng8.2 xcp-ng-updates
xcp-ng-release.x86_64 8.2.1-10 xcp-ng-updates
xcp-ng-release-config.x86_64 8.2.1-10 xcp-ng-updates
[...]
Seems like the old package is just stuck there and is a duplicate version
[12:34 ovbh-pprod-xen10 ~]# yum remove xcp-ng-linstor.noarch 0:1.0-1
Loaded plugins: fastestmirror
No Match for argument: 0:1.0-1
Resolving Dependencies
--> Running transaction check
---> Package xcp-ng-linstor.noarch 0:1.0-1.xcpng8.2 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Package Arch Version Repository Size
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Removing:
xcp-ng-linstor noarch 1.0-1.xcpng8.2 @xcp-ng-linstor 0.0
Transaction Summary
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Remove 1 Package
Installed size: 0
Is this ok [y/N]:
Unfortunately does not look like that works. Unless I am doing something wrong.
[12:26 ovbh-pprod-xen10 ~]# yum update xcp-ng-release-linstor
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Excluding mirror: updates.xcp-ng.org
* xcp-ng-base: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-linstor: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-updates: mirrors.xcp-ng.org
Resolving Dependencies
--> Running transaction check
---> Package xcp-ng-release-linstor.noarch 0:1.2-1.xcpng8.2 will be updated
---> Package xcp-ng-release-linstor.noarch 0:1.3-1.xcpng8.2 will be an update
--> Finished Dependency Resolution
Dependencies Resolved
==========================================================================================================================================================================================================================================================================================
Package Arch Version Repository Size
==========================================================================================================================================================================================================================================================================================
Updating:
xcp-ng-release-linstor noarch 1.3-1.xcpng8.2 xcp-ng-updates 4.0 k
Transaction Summary
==========================================================================================================================================================================================================================================================================================
Upgrade 1 Package
Total download size: 4.0 k
Is this ok [y/d/N]: y
Downloading packages:
Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
xcp-ng-release-linstor-1.3-1.xcpng8.2.noarch.rpm | 4.0 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Updating : xcp-ng-release-linstor-1.3-1.xcpng8.2.noarch 1/2
Cleanup : xcp-ng-release-linstor-1.2-1.xcpng8.2.noarch 2/2
Verifying : xcp-ng-release-linstor-1.3-1.xcpng8.2.noarch 1/2
Verifying : xcp-ng-release-linstor-1.2-1.xcpng8.2.noarch 2/2
Updated:
xcp-ng-release-linstor.noarch 0:1.3-1.xcpng8.2
Complete!
[12:26 ovbh-pprod-xen10 ~]# yum update
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Excluding mirror: updates.xcp-ng.org
* xcp-ng-base: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-updates: mirrors.xcp-ng.org
Resolving Dependencies
--> Running transaction check
---> Package blktap.x86_64 0:3.37.4-1.0.1.0.linstor.1.xcpng8.2 will be updated
---> Package blktap.x86_64 0:3.37.4-2.1.xcpng8.2 will be an update
---> Package device-mapper-multipath.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package device-mapper-multipath.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package device-mapper-multipath-libs.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package device-mapper-multipath-libs.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package e2fsprogs.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package e2fsprogs.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
--> Processing Dependency: libfuse.so.2(FUSE_2.5)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2(FUSE_2.6)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2(FUSE_2.8)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2()(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
---> Package e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package e2fsprogs-libs.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package forkexecd.x86_64 0:1.18.1-1.1.xcpng8.2 will be updated
---> Package forkexecd.x86_64 0:1.18.3-3.1.xcpng8.2 will be an update
---> Package gpumon.x86_64 0:0.18.0-4.2.xcpng8.2 will be updated
---> Package gpumon.x86_64 0:0.18.0-11.2.xcpng8.2 will be an update
---> Package grub.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package grub-efi.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub-efi.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package grub-tools.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub-tools.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package guest-templates-json.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-linux.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-linux.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-other.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-other.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-windows.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-windows.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package http-nbd-transfer.x86_64 0:1.2.0-1.xcpng8.2 will be updated
---> Package http-nbd-transfer.x86_64 0:1.3.0-1.xcpng8.2 will be an update
---> Package irqbalance.x86_64 3:1.0.7-11.xcpng8.2 will be updated
---> Package irqbalance.x86_64 3:1.0.7-16.xcpng8.2 will be an update
---> Package kernel.x86_64 0:4.19.19-7.0.15.1.xcpng8.2 will be updated
---> Package kernel.x86_64 0:4.19.19-7.0.23.1.xcpng8.2 will be an update
---> Package kpartx.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package kpartx.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package libcom_err.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package libcom_err.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package libss.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package libss.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package linux-firmware.noarch 0:20190314-5.1.xcpng8.2 will be updated
---> Package linux-firmware.noarch 0:20190314-10.2.xcpng8.2 will be an update
---> Package lldpad.x86_64 0:1.0.1-3.git036e314.xcpng8.2 will be updated
---> Package lldpad.x86_64 0:1.0.1-10.xcpng8.2 will be an update
---> Package message-switch.x86_64 0:1.23.2-3.2.xcpng8.2 will be updated
---> Package message-switch.x86_64 0:1.23.2-10.1.xcpng8.2 will be an update
---> Package microcode_ctl.x86_64 2:2.1-26.xs23.1.xcpng8.2 will be updated
---> Package microcode_ctl.x86_64 2:2.1-26.xs26.2.xcpng8.2 will be an update
---> Package nbd.x86_64 0:3.14-2.el7 will be updated
---> Package nbd.x86_64 0:3.24-1.xcpng8.2 will be an update
---> Package qemu.x86_64 2:4.2.1-4.6.2.1.xcpng8.2 will be updated
---> Package qemu.x86_64 2:4.2.1-4.6.3.1.xcpng8.2 will be an update
---> Package rrd2csv.x86_64 0:1.2.5-7.1.xcpng8.2 will be updated
---> Package rrd2csv.x86_64 0:1.2.6-8.1.xcpng8.2 will be an update
---> Package rrdd-plugins.x86_64 0:1.10.8-5.1.xcpng8.2 will be updated
---> Package rrdd-plugins.x86_64 0:1.10.9-5.1.xcpng8.2 will be an update
---> Package sm.x86_64 0:2.30.7-1.3.0.linstor.7.xcpng8.2 will be updated
---> Package sm.x86_64 0:2.30.8-7.1.xcpng8.2 will be an update
---> Package sm-cli.x86_64 0:0.23.0-7.xcpng8.2 will be updated
---> Package sm-cli.x86_64 0:0.23.0-54.1.xcpng8.2 will be an update
---> Package sm-rawhba.x86_64 0:2.30.7-1.3.0.linstor.7.xcpng8.2 will be updated
---> Package sm-rawhba.x86_64 0:2.30.8-7.1.xcpng8.2 will be an update
---> Package squeezed.x86_64 0:0.27.0-5.xcpng8.2 will be updated
---> Package squeezed.x86_64 0:0.27.0-11.1.xcpng8.2 will be an update
---> Package tzdata.noarch 0:2022a-1.el7 will be updated
---> Package tzdata.noarch 0:2023c-1.el7 will be an update
---> Package tzdata-java.noarch 0:2022a-1.el7 will be updated
---> Package tzdata-java.noarch 0:2023c-1.el7 will be an update
---> Package varstored-guard.x86_64 0:0.6.2-1.xcpng8.2 will be updated
---> Package varstored-guard.x86_64 0:0.6.2-8.xcpng8.2 will be an update
---> Package vendor-drivers.x86_64 0:1.0.2-1.3.xcpng8.2 will be updated
---> Package vendor-drivers.x86_64 0:1.0.2-1.6.xcpng8.2 will be an update
--> Processing Dependency: mpi3mr-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
--> Processing Dependency: r8125-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
--> Processing Dependency: igc-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
---> Package vhd-tool.x86_64 0:0.43.0-4.1.xcpng8.2 will be updated
---> Package vhd-tool.x86_64 0:0.43.0-11.1.xcpng8.2 will be an update
---> Package wsproxy.x86_64 0:1.12.0-5.xcpng8.2 will be updated
---> Package wsproxy.x86_64 0:1.12.0-12.xcpng8.2 will be an update
---> Package xapi-core.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-core.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xapi-nbd.x86_64 0:1.11.0-3.2.xcpng8.2 will be updated
---> Package xapi-nbd.x86_64 0:1.11.0-10.1.xcpng8.2 will be an update
---> Package xapi-storage.x86_64 0:11.19.0_sxm2-3.xcpng8.2 will be updated
---> Package xapi-storage.x86_64 0:11.19.0_sxm2-10.xcpng8.2 will be an update
---> Package xapi-storage-script.x86_64 0:0.34.1-2.1.xcpng8.2 will be updated
---> Package xapi-storage-script.x86_64 0:0.34.1-9.1.xcpng8.2 will be an update
---> Package xapi-tests.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-tests.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xapi-xe.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-xe.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xcp-networkd.x86_64 0:0.56.2-1.xcpng8.2 will be updated
---> Package xcp-networkd.x86_64 0:0.56.2-8.xcpng8.2 will be an update
---> Package xcp-ng-linstor.noarch 0:1.0-1.xcpng8.2 will be updated
---> Package xcp-ng-linstor.noarch 0:1.1-3.xcpng8.2 will be an update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
---> Package xcp-ng-release.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-release-config.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release-config.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-release-presets.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release-presets.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-xapi-plugins.noarch 0:1.7.2-1.0.0.linstor.1.xcpng8.2 will be updated
---> Package xcp-ng-xapi-plugins.noarch 0:1.8.0-1.xcpng8.2 will be an update
---> Package xcp-rrdd.x86_64 0:1.33.0-6.1.xcpng8.2 will be updated
---> Package xcp-rrdd.x86_64 0:1.33.2-7.1.xcpng8.2 will be an update
---> Package xen-dom0-libs.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-dom0-libs.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-dom0-tools.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-dom0-tools.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-hypervisor.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-hypervisor.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-libs.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-libs.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-tools.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-tools.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xenopsd.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xenopsd-cli.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd-cli.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xenopsd-xc.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd-xc.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xs-openssl-libs.x86_64 1:1.1.1k-6.1.xcpng8.2 will be updated
---> Package xs-openssl-libs.x86_64 1:1.1.1k-9.1.xcpng8.2 will be an update
---> Package zabbix-agent.x86_64 0:7.0.0-alpha3.release1.el7 will be updated
---> Package zabbix-agent.x86_64 0:7.0.0-beta1.release1.el7 will be an update
--> Running transaction check
---> Package fuse-libs.x86_64 0:2.9.2-10.xcpng8.2 will be installed
---> Package igc-module.x86_64 0:5.10.200-1.xcpng8.2 will be installed
---> Package mpi3mr-module.x86_64 0:8.6.1.0.0-1.xcpng8.2 will be installed
---> Package r8125-module.x86_64 0:9.012.03-1.xcpng8.2 will be installed
---> Package xcp-ng-linstor.noarch 0:1.1-3.xcpng8.2 will be an update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
--> Finished Dependency Resolution
Error: Package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch (xcp-ng-updates)
Requires: sm-linstor
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
@stormi Message sent, thanks!
No change if excluding zabbix
[13:20 ovbh-pprod-xen10 ~]# yum update --disablerepo=zabbix*
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Excluding mirror: updates.xcp-ng.org
* xcp-ng-base: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-linstor: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-updates: mirrors.xcp-ng.org
Resolving Dependencies
--> Running transaction check
---> Package blktap.x86_64 0:3.37.4-1.0.1.0.linstor.1.xcpng8.2 will be updated
---> Package blktap.x86_64 0:3.37.4-2.1.xcpng8.2 will be an update
---> Package device-mapper-multipath.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package device-mapper-multipath.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package device-mapper-multipath-libs.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package device-mapper-multipath-libs.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package e2fsprogs.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package e2fsprogs.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
--> Processing Dependency: libfuse.so.2(FUSE_2.5)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2(FUSE_2.6)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2(FUSE_2.8)(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
--> Processing Dependency: libfuse.so.2()(64bit) for package: e2fsprogs-1.47.0-1.1.xcpng8.2.x86_64
---> Package e2fsprogs-libs.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package e2fsprogs-libs.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package forkexecd.x86_64 0:1.18.1-1.1.xcpng8.2 will be updated
---> Package forkexecd.x86_64 0:1.18.3-3.1.xcpng8.2 will be an update
---> Package gpumon.x86_64 0:0.18.0-4.2.xcpng8.2 will be updated
---> Package gpumon.x86_64 0:0.18.0-11.2.xcpng8.2 will be an update
---> Package grub.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package grub-efi.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub-efi.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package grub-tools.x86_64 1:2.02-3.1.0.xcpng8.2 will be updated
---> Package grub-tools.x86_64 1:2.02-3.2.0.xcpng8.2 will be an update
---> Package guest-templates-json.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-linux.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-linux.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-other.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-other.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package guest-templates-json-data-windows.noarch 0:1.9.6-1.2.xcpng8.2 will be updated
---> Package guest-templates-json-data-windows.noarch 0:1.10.6-1.1.xcpng8.2 will be an update
---> Package http-nbd-transfer.x86_64 0:1.2.0-1.xcpng8.2 will be updated
---> Package http-nbd-transfer.x86_64 0:1.3.0-1.xcpng8.2 will be an update
---> Package irqbalance.x86_64 3:1.0.7-11.xcpng8.2 will be updated
---> Package irqbalance.x86_64 3:1.0.7-16.xcpng8.2 will be an update
---> Package kernel.x86_64 0:4.19.19-7.0.15.1.xcpng8.2 will be updated
---> Package kernel.x86_64 0:4.19.19-7.0.23.1.xcpng8.2 will be an update
---> Package kpartx.x86_64 0:0.4.9-119.xs+1.2.xcpng8.2 will be updated
---> Package kpartx.x86_64 0:0.4.9-136.xcpng8.2 will be an update
---> Package libcom_err.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package libcom_err.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package libss.x86_64 0:1.42.9-12.el7_5 will be updated
---> Package libss.x86_64 0:1.47.0-1.1.xcpng8.2 will be an update
---> Package linux-firmware.noarch 0:20190314-5.1.xcpng8.2 will be updated
---> Package linux-firmware.noarch 0:20190314-10.2.xcpng8.2 will be an update
---> Package lldpad.x86_64 0:1.0.1-3.git036e314.xcpng8.2 will be updated
---> Package lldpad.x86_64 0:1.0.1-10.xcpng8.2 will be an update
---> Package message-switch.x86_64 0:1.23.2-3.2.xcpng8.2 will be updated
---> Package message-switch.x86_64 0:1.23.2-10.1.xcpng8.2 will be an update
---> Package microcode_ctl.x86_64 2:2.1-26.xs23.1.xcpng8.2 will be updated
---> Package microcode_ctl.x86_64 2:2.1-26.xs26.2.xcpng8.2 will be an update
---> Package nbd.x86_64 0:3.14-2.el7 will be updated
---> Package nbd.x86_64 0:3.24-1.xcpng8.2 will be an update
---> Package qemu.x86_64 2:4.2.1-4.6.2.1.xcpng8.2 will be updated
---> Package qemu.x86_64 2:4.2.1-4.6.3.1.xcpng8.2 will be an update
---> Package rrd2csv.x86_64 0:1.2.5-7.1.xcpng8.2 will be updated
---> Package rrd2csv.x86_64 0:1.2.6-8.1.xcpng8.2 will be an update
---> Package rrdd-plugins.x86_64 0:1.10.8-5.1.xcpng8.2 will be updated
---> Package rrdd-plugins.x86_64 0:1.10.9-5.1.xcpng8.2 will be an update
---> Package sm.x86_64 0:2.30.7-1.3.0.linstor.7.xcpng8.2 will be updated
---> Package sm.x86_64 0:2.30.8-7.1.xcpng8.2 will be an update
---> Package sm-cli.x86_64 0:0.23.0-7.xcpng8.2 will be updated
---> Package sm-cli.x86_64 0:0.23.0-54.1.xcpng8.2 will be an update
---> Package sm-rawhba.x86_64 0:2.30.7-1.3.0.linstor.7.xcpng8.2 will be updated
---> Package sm-rawhba.x86_64 0:2.30.8-7.1.xcpng8.2 will be an update
---> Package squeezed.x86_64 0:0.27.0-5.xcpng8.2 will be updated
---> Package squeezed.x86_64 0:0.27.0-11.1.xcpng8.2 will be an update
---> Package tzdata.noarch 0:2022a-1.el7 will be updated
---> Package tzdata.noarch 0:2023c-1.el7 will be an update
---> Package tzdata-java.noarch 0:2022a-1.el7 will be updated
---> Package tzdata-java.noarch 0:2023c-1.el7 will be an update
---> Package varstored-guard.x86_64 0:0.6.2-1.xcpng8.2 will be updated
---> Package varstored-guard.x86_64 0:0.6.2-8.xcpng8.2 will be an update
---> Package vendor-drivers.x86_64 0:1.0.2-1.3.xcpng8.2 will be updated
---> Package vendor-drivers.x86_64 0:1.0.2-1.6.xcpng8.2 will be an update
--> Processing Dependency: mpi3mr-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
--> Processing Dependency: r8125-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
--> Processing Dependency: igc-module for package: vendor-drivers-1.0.2-1.6.xcpng8.2.x86_64
---> Package vhd-tool.x86_64 0:0.43.0-4.1.xcpng8.2 will be updated
---> Package vhd-tool.x86_64 0:0.43.0-11.1.xcpng8.2 will be an update
---> Package wsproxy.x86_64 0:1.12.0-5.xcpng8.2 will be updated
---> Package wsproxy.x86_64 0:1.12.0-12.xcpng8.2 will be an update
---> Package xapi-core.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-core.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xapi-nbd.x86_64 0:1.11.0-3.2.xcpng8.2 will be updated
---> Package xapi-nbd.x86_64 0:1.11.0-10.1.xcpng8.2 will be an update
---> Package xapi-storage.x86_64 0:11.19.0_sxm2-3.xcpng8.2 will be updated
---> Package xapi-storage.x86_64 0:11.19.0_sxm2-10.xcpng8.2 will be an update
---> Package xapi-storage-script.x86_64 0:0.34.1-2.1.xcpng8.2 will be updated
---> Package xapi-storage-script.x86_64 0:0.34.1-9.1.xcpng8.2 will be an update
---> Package xapi-tests.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-tests.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xapi-xe.x86_64 0:1.249.26-2.1.xcpng8.2 will be updated
---> Package xapi-xe.x86_64 0:1.249.32-2.1.xcpng8.2 will be an update
---> Package xcp-networkd.x86_64 0:0.56.2-1.xcpng8.2 will be updated
---> Package xcp-networkd.x86_64 0:0.56.2-8.xcpng8.2 will be an update
---> Package xcp-ng-linstor.noarch 0:1.0-1.xcpng8.2 will be updated
---> Package xcp-ng-linstor.noarch 0:1.1-3.xcpng8.2 will be an update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
---> Package xcp-ng-release.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-release-config.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release-config.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-release-linstor.noarch 0:1.2-1.xcpng8.2 will be updated
---> Package xcp-ng-release-linstor.noarch 0:1.3-1.xcpng8.2 will be an update
---> Package xcp-ng-release-presets.x86_64 0:8.2.1-6 will be updated
---> Package xcp-ng-release-presets.x86_64 0:8.2.1-10 will be an update
---> Package xcp-ng-xapi-plugins.noarch 0:1.7.2-1.0.0.linstor.1.xcpng8.2 will be updated
---> Package xcp-ng-xapi-plugins.noarch 0:1.8.0-1.xcpng8.2 will be an update
---> Package xcp-rrdd.x86_64 0:1.33.0-6.1.xcpng8.2 will be updated
---> Package xcp-rrdd.x86_64 0:1.33.2-7.1.xcpng8.2 will be an update
---> Package xen-dom0-libs.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-dom0-libs.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-dom0-tools.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-dom0-tools.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-hypervisor.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-hypervisor.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-libs.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-libs.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xen-tools.x86_64 0:4.13.5-9.30.3.xcpng8.2 will be updated
---> Package xen-tools.x86_64 0:4.13.5-9.38.3.xcpng8.2 will be an update
---> Package xenopsd.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xenopsd-cli.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd-cli.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xenopsd-xc.x86_64 0:0.150.12-1.2.xcpng8.2 will be updated
---> Package xenopsd-xc.x86_64 0:0.150.17-2.1.xcpng8.2 will be an update
---> Package xs-openssl-libs.x86_64 1:1.1.1k-6.1.xcpng8.2 will be updated
---> Package xs-openssl-libs.x86_64 1:1.1.1k-9.1.xcpng8.2 will be an update
--> Running transaction check
---> Package fuse-libs.x86_64 0:2.9.2-10.xcpng8.2 will be installed
---> Package igc-module.x86_64 0:5.10.200-1.xcpng8.2 will be installed
---> Package mpi3mr-module.x86_64 0:8.6.1.0.0-1.xcpng8.2 will be installed
---> Package r8125-module.x86_64 0:9.012.03-1.xcpng8.2 will be installed
---> Package xcp-ng-linstor.noarch 0:1.1-3.xcpng8.2 will be an update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
--> Finished Dependency Resolution
Error: Package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch (xcp-ng-updates)
Requires: sm-linstor
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
@Jonathon said in XOSTOR hyperconvergence preview:
@stormi said in XOSTOR hyperconvergence preview:
yum repolist
lol glad I checked then
# yum repolist Loaded plugins: fastestmirror Loading mirror speeds from cached hostfile Excluding mirror: updates.xcp-ng.org * xcp-ng-base: mirrors.xcp-ng.org Excluding mirror: updates.xcp-ng.org * xcp-ng-linstor: mirrors.xcp-ng.org Excluding mirror: updates.xcp-ng.org * xcp-ng-updates: mirrors.xcp-ng.org repo id repo name status !xcp-ng-base XCP-ng Base Repository 2,161 !xcp-ng-linstor XCP-ng LINSTOR Repository 142 !xcp-ng-updates XCP-ng Updates Repository 1,408 !zabbix/x86_64 Zabbix Official Repository - x86_64 79 !zabbix-non-supported/x86_64 Zabbix Official Repository non-supported - x86_64 6 repolist: 3,796
I was wondering if anyone had any insight into this? Unable to upgrade xen hosts.
@stormi said in XOSTOR hyperconvergence preview:
yum repolist
lol glad I checked then
# yum repolist
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Excluding mirror: updates.xcp-ng.org
* xcp-ng-base: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-linstor: mirrors.xcp-ng.org
Excluding mirror: updates.xcp-ng.org
* xcp-ng-updates: mirrors.xcp-ng.org
repo id repo name status
!xcp-ng-base XCP-ng Base Repository 2,161
!xcp-ng-linstor XCP-ng LINSTOR Repository 142
!xcp-ng-updates XCP-ng Updates Repository 1,408
!zabbix/x86_64 Zabbix Official Repository - x86_64 79
!zabbix-non-supported/x86_64 Zabbix Official Repository non-supported - x86_64 6
repolist: 3,796
Hello!
I am attempting to update our hosts, starting with the pool controller. But I am getting a message that I wanted to ask about.
The following happens when I attempt a yum update
--> Processing Dependency: sm-linstor for package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch
--> Finished Dependency Resolution
Error: Package: xcp-ng-linstor-1.1-3.xcpng8.2.noarch (xcp-ng-updates)
Requires: sm-linstor
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
Only reference I am finding is here: https://koji.xcp-ng.org/buildinfo?buildID=3044
My best guess is I need to do two updates, the first one skip broken. But wanted to ask to be sure as to not put things in a weird state.
Thanks in advance!