test_generator_collectively.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. import time
  4. import numpy
  5. import pytest
  6. import torch
  7. from draugr import batched_recycle
  8. from draugr.torch_utilities import to_tensor_generator
  9. __author__ = "Christian Heider Nielsen"
  10. __doc__ = r"""
  11. Created on 28/10/2019
  12. """
  13. @pytest.mark.skip
  14. def test_d1():
  15. channels_in = 3
  16. channels_out = 3
  17. samples = 4
  18. device = "cuda"
  19. batches = 10
  20. batch_size = 32
  21. data_shape = (batches * batch_size, channels_in, 512, 512)
  22. model = torch.nn.Sequential(
  23. torch.nn.Conv2d(channels_in, channels_out, (3, 3)),
  24. torch.nn.ReLU(),
  25. torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
  26. torch.nn.ReLU(),
  27. torch.nn.Conv2d(channels_out, channels_out, (3, 3)),
  28. torch.nn.ReLU(),
  29. ).to("cuda")
  30. for _ in range(samples):
  31. s1 = time.time()
  32. for _, a in zip(
  33. range(batches),
  34. to_tensor_generator(
  35. batched_recycle(numpy.random.sample(data_shape), batch_size),
  36. device=device,
  37. preload_next=False,
  38. ),
  39. ):
  40. model(a)
  41. s2 = time.time()
  42. for _, a in zip(
  43. range(batches),
  44. to_tensor_generator(
  45. batched_recycle(numpy.random.sample(data_shape), batch_size),
  46. device=device,
  47. ),
  48. ):
  49. model(a)
  50. s3 = time.time()
  51. print(s2 - s1)
  52. print(s3 - s2)