test code block

import torch
from esm3 import MultiNodeESM3Model

# Configuration
world_size = torch.cuda.device_count()
batch_size = 128
model = MultiNodeESM3Model(“esm3-large”).cuda()

# Dataset and Dataloader
def load_large_dataset():
# Simulate large dataset
return [“Sample sequence”] * batch_size * 10

data_loader = torch.utils.data.DataLoader(load_large_dataset(), batch_size=batch_size)

# Benchmark Function
def multi_gpu_benchmark():
model = torch.nn.DataParallel(model)
total_time = 0
total_samples = 0
for batch in data_loader:
batch = batch.to(“cuda”)
start_time = time.time()
with torch.no_grad():
outputs = model(batch)
total_time += time.time() – start_time
total_samples += len(batch)
throughput = total_samples / total_time
return throughput

# Execute
throughput = multi_gpu_benchmark()
print(f”Throughput: {throughput:.2f} samples/second”)

Visited 1 times, 1 visit(s) today

Leave a Reply

Your email address will not be published. Required fields are marked *