Skip to content

Commit

Permalink
Add test for NIC Passthrough
Browse files Browse the repository at this point in the history
Signed-off-by: Smit Gardhariya <[email protected]>
  • Loading branch information
smit-gardhariya committed Jan 10, 2025
1 parent 4b8bca8 commit 343e6d9
Show file tree
Hide file tree
Showing 9 changed files with 352 additions and 36 deletions.
3 changes: 3 additions & 0 deletions lisa/sut_orchestrator/libvirt/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,9 @@ class NodeContext:
default_factory=list,
)

# Add host detail under node context for device-passthrough testcases
host_node: Any = None


def get_environment_context(environment: Environment) -> EnvironmentContext:
return environment.get_context(EnvironmentContext)
Expand Down
72 changes: 61 additions & 11 deletions lisa/sut_orchestrator/libvirt/libvirt_device_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,24 +186,74 @@ def create_device_pool(
vendor_id=vendor_id,
device_id=device_id,
)
primary_nic_iommu = self.get_primary_nic_id()
for item in device_list:
device = DeviceAddressSchema()
domain, bus, slot, fn = self._parse_pci_address_str(addr=item.slot)
device.domain = domain
device.bus = bus
device.slot = slot
device.function = fn
bdf_list = [i.slot for i in device_list]
self._create_pool(pool_type, bdf_list)

def create_device_pool_from_pci_addresses(
self,
pool_type: HostDevicePoolType,
pci_addr_list: List[str],
) -> None:
self.available_host_devices[pool_type] = {}
for bdf in pci_addr_list:
domain, bus, slot, fn = self._parse_pci_address_str(bdf)
device = self._get_pci_address_instance(domain, bus, slot, fn)
iommu_group = self._get_device_iommu_group(device)
is_vfio_pci = self._is_driver_vfio_pci(device)

if not is_vfio_pci and iommu_group not in primary_nic_iommu:
# Get all the devices of that iommu group
iommu_path = f"/sys/kernel/iommu_groups/{iommu_group}/devices"
bdf_list = [i.strip() for i in self.host_node.tools[Ls].list(iommu_path)]
bdf_list.append(bdf.strip()) # append the given device in list

self._create_pool(pool_type, bdf_list)

def _create_pool(
self,
pool_type: HostDevicePoolType,
bdf_list: List[str],
) -> None:
iommu_grp_of_used_devices = []
primary_nic_iommu = self.get_primary_nic_id()
for bdf in bdf_list:
domain, bus, slot, fn = self._parse_pci_address_str(bdf)
dev = self._get_pci_address_instance(domain, bus, slot, fn)
is_vfio_pci = self._is_driver_vfio_pci(dev)
iommu_group = self._get_device_iommu_group(dev)

if iommu_group in iommu_grp_of_used_devices:
# No need to add this device in pool as one of the devices for this
# iommu group is in use
continue

if is_vfio_pci:
# Do not consider any device for pool if any device of same iommu group
# is already assigned
pool = self.available_host_devices.get(pool_type, {})
pool.pop(iommu_group, [])
self.available_host_devices[pool_type] = pool
iommu_grp_of_used_devices.append(iommu_group)
elif (
iommu_group not in primary_nic_iommu and
iommu_group not in iommu_grp_of_used_devices
):
pool = self.available_host_devices.get(pool_type, {})
devices = pool.get(iommu_group, [])
devices.append(device)
if dev not in devices:
devices.append(dev)
pool[iommu_group] = devices
self.available_host_devices[pool_type] = pool

def _get_pci_address_instance(
self, domain: str, bus: str, slot: str, fn: str,
) -> DeviceAddressSchema:
device = DeviceAddressSchema()
device.domain = domain
device.bus = bus
device.slot = slot
device.function = fn

return device

def _add_device_passthrough_xml(
self,
devices: ET.Element,
Expand Down
4 changes: 4 additions & 0 deletions lisa/sut_orchestrator/libvirt/platform.py
Original file line number Diff line number Diff line change
Expand Up @@ -813,6 +813,10 @@ def _fill_nodes_metadata(self, environment: Environment, log: Logger) -> None:
)

node_context = get_node_context(node)
if self.host_node.is_remote:
node_context.host_node = remote_node
else:
node_context.host_node = self.host_node
if node_context.init_system == InitSystem.CLOUD_INIT:
# Ensure cloud-init completes its setup.
node.execute(
Expand Down
10 changes: 10 additions & 0 deletions lisa/sut_orchestrator/libvirt/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,16 @@ class DeviceAddressSchema:
slot: str = ""
function: str = ""

def __eq__(self, other):
if isinstance(other, DeviceAddressSchema):
return (
self.domain == other.domain and
self.bus == other.bus and
self.slot == other.slot and
self.function == other.function
)
return False


# QEMU orchestrator's global configuration options.
@dataclass_json()
Expand Down
55 changes: 38 additions & 17 deletions lisa/sut_orchestrator/util/device_pool.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
from typing import Any, List, Optional

from lisa.sut_orchestrator.util.schema import HostDevicePoolSchema, HostDevicePoolType
from lisa.sut_orchestrator.util.schema import (
HostDevicePoolSchema,
HostDevicePoolType,
PciAddressIdentifier,
VendorDeviceIdIdentifier,
)
from lisa.util import LisaException


Expand All @@ -16,6 +21,13 @@ def create_device_pool(
) -> None:
raise NotImplementedError()

def create_device_pool_from_pci_addresses(
self,
pool_type: HostDevicePoolType,
pci_addr_list: List[str],
) -> None:
raise NotImplementedError()

def get_primary_nic_id(self) -> List[str]:
raise NotImplementedError()

Expand Down Expand Up @@ -44,22 +56,31 @@ def configure_device_passthrough_pool(
f"Pool type '{pool_type}' is not supported by platform"
)
for config in device_configs:
vendor_device_list = config.devices
if len(vendor_device_list) > 1:
raise LisaException(
"Device Pool does not support more than one "
"vendor/device id list for given pool type"
)
device_list = config.devices
if all(isinstance(d, VendorDeviceIdIdentifier) for d in device_list):
if len(device_list) > 1:
raise LisaException(
"Device Pool does not support more than one "
"vendor/device id list for given pool type"
)

vendor_device_id = vendor_device_list[0]
assert vendor_device_id.vendor_id.strip()
vendor_id = vendor_device_id.vendor_id.strip()
vendor_device_id = device_list[0]
assert vendor_device_id.vendor_id.strip()
vendor_id = vendor_device_id.vendor_id.strip()

assert vendor_device_id.device_id.strip()
device_id = vendor_device_id.device_id.strip()
assert vendor_device_id.device_id.strip()
device_id = vendor_device_id.device_id.strip()

self.create_device_pool(
pool_type=config.type,
vendor_id=vendor_id,
device_id=device_id,
)
self.create_device_pool(
pool_type=config.type,
vendor_id=vendor_id,
device_id=device_id,
)
elif all(isinstance(d, PciAddressIdentifier) for d in device_list):
# Create pool from the list of PCI addresses
self.create_device_pool_from_pci_addresses(
pool_type=config.type,
pci_addr_list=device_list,
)
else:
raise LisaException("Unknown device identifier")
15 changes: 12 additions & 3 deletions lisa/sut_orchestrator/util/schema.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from dataclasses import dataclass, field
from enum import Enum
from typing import List
from typing import List, Union

from dataclasses_json import dataclass_json

Expand All @@ -12,17 +12,26 @@ class HostDevicePoolType(Enum):

@dataclass_json()
@dataclass
class DeviceIdentifier:
class VendorDeviceIdIdentifier:
vendor_id: str = ""
device_id: str = ""


@dataclass_json()
@dataclass
class PciAddressIdentifier:
# ex. 0000:3b:00.0 - <domain>:<bus>:<slot>.<fn>
pci_bdf: str = ""


# Configuration options for device-passthrough for the VM.
@dataclass_json()
@dataclass
class HostDevicePoolSchema:
type: HostDevicePoolType = HostDevicePoolType.PCI_NIC
devices: List[DeviceIdentifier] = field(default_factory=list)
devices: Union[
List[VendorDeviceIdIdentifier], List[PciAddressIdentifier]
] = field(default_factory=list)


@dataclass_json()
Expand Down
3 changes: 3 additions & 0 deletions lisa/tools/iperf3.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ def run_as_server_async(
use_json_format: bool = False,
one_connection_only: bool = False,
daemon: bool = True,
interface_ip: str = "",
) -> Process:
# -s: run iperf3 as server mode
# -D: run iperf3 as a daemon
Expand All @@ -135,6 +136,8 @@ def run_as_server_async(
cmd += f" -f {report_unit} "
if port:
cmd += f" -p {port} "
if interface_ip:
cmd += f" -B {interface_ip}"
process = self.node.execute_async(
f"{self.command} {cmd}", shell=True, sudo=True
)
Expand Down
30 changes: 27 additions & 3 deletions microsoft/testsuites/performance/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,17 +446,35 @@ def perf_iperf(
connections: List[int],
buffer_length_list: List[int],
udp_mode: bool = False,
server: Optional[RemoteNode] = None,
client: Optional[RemoteNode] = None,
run_server_on_internal_address: bool = False,
) -> None:
environment = test_result.environment
assert environment, "fail to get environment from testresult"

client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])
if server is not None or client is not None:
assert server is not None, "server need to be specified, if client is set"
assert client is not None, "client need to be specified, if server is set"
else:
environment = test_result.environment
assert environment, "fail to get environment from testresult"
# set server and client from environment, if not set explicitly
client = cast(RemoteNode, environment.nodes[0])
server = cast(RemoteNode, environment.nodes[1])

# Ensure that both server and client are non-None before accessing tools
assert client is not None, "client is None, cannot access tools"
assert server is not None, "server is None, cannot access tools"

client_iperf3, server_iperf3 = run_in_parallel(
[lambda: client.tools[Iperf3], lambda: server.tools[Iperf3]]
)
test_case_name = inspect.stack()[1][3]
iperf3_messages_list: List[Any] = []
server_interface_ip = ""
if run_server_on_internal_address:
server_interface_ip = server.internal_address
if udp_mode:
for node in [client, server]:
ssh = node.tools[Ssh]
Expand All @@ -481,7 +499,13 @@ def perf_iperf(
current_server_iperf_instances += 1
server_iperf3_process_list.append(
server_iperf3.run_as_server_async(
current_server_port, "g", 10, True, True, False
port=current_server_port,
report_unit="g",
report_periodic=10,
use_json_format=True,
one_connection_only=True,
daemon=False,
interface_ip=server_interface_ip,
)
)
current_server_port += 1
Expand Down
Loading

0 comments on commit 343e6d9

Please sign in to comment.