123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188 |
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- import numpy
- import pytest
- import torch
- __author__ = "Christian Heider Nielsen"
- __doc__ = ""
- from torch import nn
- from draugr.torch_utilities import to_tensor, MLP, constant_init
- def test_single_dim():
- pos_size = (4,)
- a_size = (1,)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- print(model(pos_1))
- def test_hidden_dim():
- pos_size = (4,)
- hidden_size = (2, 3)
- a_size = (2,)
- model = MLP(input_shape=pos_size, hidden_layers=hidden_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- print(model(pos_1))
- @pytest.mark.skip
- def test_multi_dim():
- """
- TODO: BROKEN!
- """
- pos_size = (2, 3, 2) # two, 2d tensors, expected flatten
- a_size = (2, 4, 5)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, numpy.prod(pos_size[1:])), device="cpu", dtype=torch.float
- )
- pos_2 = to_tensor(
- numpy.random.rand(64, numpy.prod(pos_size[1:])), device="cpu", dtype=torch.float
- )
- print(model(pos_1, pos_2))
- def test_single_dim2():
- """description"""
- pos_size = (4,)
- a_size = (1,)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- print(model(pos_1)[0].shape)
- def test_hidden_dim2():
- """description"""
- pos_size = (3,)
- hidden_size = list(range(6, 10))
- a_size = (4,)
- model = MLP(
- input_shape=pos_size,
- hidden_layers=hidden_size,
- output_shape=a_size,
- hidden_layer_activation=torch.nn.Tanh(),
- default_init=None,
- )
- model2 = nn.Sequential(
- *[
- nn.Linear(3, 6),
- nn.Tanh(),
- nn.Linear(6, 7),
- nn.Tanh(),
- nn.Linear(7, 8),
- nn.Tanh(),
- nn.Linear(8, 9),
- nn.Tanh(),
- nn.Linear(9, 4),
- ]
- )
- model3 = nn.Sequential(
- *[
- nn.Linear(3, 6),
- nn.Tanh(),
- nn.Linear(6, 7),
- nn.Tanh(),
- nn.Linear(7, 8),
- nn.Tanh(),
- nn.Linear(8, 9),
- nn.Tanh(),
- nn.Linear(9, 4),
- ]
- )
- constant_init(model, 0.142)
- constant_init(model2, 0.142)
- constant_init(model3, 0.142)
- print(model, model2, model3)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- print(model(pos_1)[0].shape)
- print(model2(pos_1).shape)
- print(model3(pos_1).shape)
- def test_multi_dim_in():
- """description"""
- pos_size = (2, 3, 2)
- a_size = (2, 4, 5)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- pos_2 = to_tensor(
- numpy.random.rand(64, pos_size[1]), device="cpu", dtype=torch.float
- )
- pos_3 = to_tensor(
- numpy.random.rand(64, pos_size[2]), device="cpu", dtype=torch.float
- )
- heads = model(pos_1, pos_2, pos_3)
- for h in heads:
- print(h.shape)
- def test_multi_dim_out():
- """description"""
- pos_size = (10,)
- a_size = (2, 1)
- model = MLP(input_shape=pos_size, hidden_layers=(100,), output_shape=a_size)
- pos_1 = to_tensor(numpy.random.rand(64, *pos_size), device="cpu", dtype=torch.float)
- res = model(pos_1)
- print(model)
- print(len(res), res[0].shape, res[1].shape)
- def test_multi_dim_both():
- """description"""
- pos_size = (2, 3)
- a_size = (2, 4, 5)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- pos_2 = to_tensor(
- numpy.random.rand(64, pos_size[1]), device="cpu", dtype=torch.float
- )
- res = model(pos_1, pos_2)
- print(model)
- print(len(res), res[0].shape, res[1].shape, res[2].shape)
- def test_auto():
- """description"""
- pos_size = (4,)
- a_size = (2,)
- model = MLP(input_shape=pos_size, output_shape=a_size)
- pos_1 = to_tensor(
- numpy.random.rand(64, pos_size[0]), device="cpu", dtype=torch.float
- )
- res = model(pos_1)
- print(model)
- print(len(res), res[0].shape)
- if __name__ == "__main__":
- test_single_dim()
- test_hidden_dim()
- # test_multi_dim()
|