import torch.distributed as dist
import torch.nn.functional as F
if not dist.is_initialized():
dist.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=rank, world_size=world_size)
# 加载模型并将其转换为 `DistributedDataParallel` 模型:
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])