num_workers.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. __author__ = "Christian Heider Nielsen"
  4. __doc__ = r"""
  5. Created on 08-12-2020
  6. """
  7. import time
  8. import numpy
  9. import torch
  10. from draugr import WorkerSession, batched_recycle
  11. from draugr.torch_utilities import to_tensor_generator
  12. def test_d3():
  13. channels_in = 3
  14. channels_out = 3
  15. samples = 10
  16. device = "cuda"
  17. batches = 3
  18. batch_size = 32
  19. data_shape = (batches * batch_size, channels_in, 512, 512)
  20. model = torch.nn.Sequential(
  21. torch.nn.Conv2d(channels_in, channels_out, (3, 3)),
  22. torch.nn.ReLU(),
  23. torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
  24. torch.nn.ReLU(),
  25. torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
  26. torch.nn.ReLU(),
  27. ).to(device)
  28. generator = to_tensor_generator(
  29. batched_recycle(numpy.random.sample(data_shape), batch_size), device=device
  30. )
  31. with WorkerSession(0.3) as num_workers:
  32. dataloader = torch.utils.data.DataLoader(
  33. numpy.random.sample(data_shape),
  34. batch_size=batch_size,
  35. shuffle=True,
  36. num_workers=num_workers,
  37. pin_memory=True,
  38. )
  39. for _ in range(samples):
  40. s1 = time.time()
  41. for _, a in zip(range(batches), dataloader):
  42. model(a.to(device, dtype=torch.float))
  43. s2 = time.time()
  44. for _, a in zip(range(batches), generator):
  45. model(a)
  46. s3 = time.time()
  47. print(f"dataloader: {s2 - s1}")
  48. print(f"generator: {s3 - s2}")
  49. if __name__ == "__main__":
  50. test_d3()