Module harvester_e2e_tests.integrations.test_3_vm

Functions

def available_node_names(api_client)
def cluster_network(api_client, vlan_nic)
def minimal_vm(api_client, unique_name, ubuntu_image, ssh_keypair, vm_checker)
def storage_network(api_client, cluster_network, vm_network, setting_checker)
def test_migrate_vm_with_multiple_volumes(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker)
def test_migrate_vm_with_user_data(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker)
def test_multiple_migrations(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names)
def ubuntu_image(api_client, unique_name, image_ubuntu, image_checker)
def vm_network(api_client, unique_name, cluster_network, vlan_id, network_checker)

Classes

class TestVMWithStorageNetwork
Expand source code
@pytest.mark.p0
@pytest.mark.networks
@pytest.mark.settings
@pytest.mark.virtualmachines
@pytest.mark.skip_version_if("< v1.0.3")
class TestVMWithStorageNetwork:
    def test_enable_storage_network_with_api_stopped_vm(
        self, api_client, minimal_vm, storage_network, setting_checker, vm_checker, volume_checker
    ):
        '''
        Steps:
          1. Have at least one Running VM
          2. Enable storage-network (should fail)
          3. Stop all VMs via API
          4. Enable storage-network
        '''
        code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
        assert 422 == code, (
            f"storage-network should NOT be enabled with running VM: {code}, {data}"
        )

        # stop VM by API
        vm_stopped, (code, data) = vm_checker.wait_status_stopped(minimal_vm.name)
        assert vm_stopped, (code, data)

        code, data = api_client.vms.get(minimal_vm.name)
        spec = api_client.vms.Spec.from_dict(data)
        vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes]
        vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names)
        assert vm_volumes_detached, (code, data)

        # enable storage-network
        code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
        assert 200 == code, (code, data)
        snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
        assert snet_enabled, (code, data)
        snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(
            storage_network.cidr
        )
        assert snet_enabled, (code, data)

    def test_enable_storage_network_with_cli_stopped_vm(
        self, api_client, ssh_keypair, minimal_vm, storage_network, setting_checker,
        vm_shell_from_host, wait_timeout, volume_checker
    ):
        ''' Refer to https://github.com/harvester/tests/issues/1022
        Steps:
          1. Have at least one Running VM
          2. Enable storage-network (should fail)
          3. Stop all VMs via VM CLI
          4. Enable storage-network
        '''
        code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
        assert 422 == code, (
            f"storage-network should NOT be enabled with running VM: {code}, {data}"
        )

        # stop VM by CLI
        with vm_shell_from_host(
            minimal_vm.host_ip, minimal_vm.vm_ip, minimal_vm.ssh_user, pkey=ssh_keypair[1]
        ) as sh:
            sh.exec_command('sudo shutdown now')

        endtime = datetime.now() + timedelta(seconds=wait_timeout)
        while endtime > datetime.now():
            code, data = api_client.vms.get(minimal_vm.name)
            if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'):
                break
            sleep(3)
        else:
            raise AssertionError(
                f"Fail to shutdown VM {minimal_vm.name} with error: {code}, {data}"
            )

        code, data = api_client.vms.get(minimal_vm.name)
        spec = api_client.vms.Spec.from_dict(data)
        vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes]
        vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names)
        assert vm_volumes_detached, (code, data)

        # enable storage-network
        code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
        assert 200 == code, (code, data)
        snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
        assert snet_enabled, (code, data)
        snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(
            storage_network.cidr
        )
        assert snet_enabled, (code, data)

Class variables

var pytestmark

Methods

def test_enable_storage_network_with_api_stopped_vm(self, api_client, minimal_vm, storage_network, setting_checker, vm_checker, volume_checker)

Steps

  1. Have at least one Running VM
  2. Enable storage-network (should fail)
  3. Stop all VMs via API
  4. Enable storage-network
def test_enable_storage_network_with_cli_stopped_vm(self, api_client, ssh_keypair, minimal_vm, storage_network, setting_checker, vm_shell_from_host, wait_timeout, volume_checker)

Refer to https://github.com/harvester/tests/issues/1022

Steps

  1. Have at least one Running VM
  2. Enable storage-network (should fail)
  3. Stop all VMs via VM CLI
  4. Enable storage-network