All the basic layers used keratorch.

class __inputDimError__[source]

__inputDimError__() :: Exception

Common base class for all non-exit exceptions.

class Layer:
    def __init__(self, input_shape=None):
#         breakpoint()
        self.input_shape = input_shape
        
    def __set_io_shape__(self, input_shape):
        if input_shape is None and self.input_shape is None:
            __inputDimError__("Need to specify input shape in first layer")
        elif input_shape:
            self.input_shape = input_shape

Dense

Linear layer that takes in input_dim and converts it to units number of dimensions.

class Dense[source]

Dense(units, input_dim=None, activation=None, use_bias=True, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None)

Dense(3, activation='mish').get_layer(5)
{'output_dim': 3,
 'layers': [Linear(in_features=5, out_features=3, bias=True), Mish()]}

Conv2D

class Conv2D[source]

Conv2D(filters:int, kernel_size:int=3, strides:int=1, padding:int=None, activation:str=None, use_bias:bool=True, input_shape:tuple=None)

out_layer = Conv2D(5, activation='Relu', input_shape=(1, 10, 10)).get_layer()
print(out_layer)
conv_layer = out_layer['layers'][0]
conv_layer(torch.zeros((1, 1, 10, 10))).shape
{'output_dim': torch.Size([5, 10, 10]), 'layers': [Conv2d(1, 5, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), ReLU(inplace=True)]}
torch.Size([1, 5, 10, 10])
out_layer = Conv2D(5, activation='Relu').get_layer((1, 10, 10))
print(out_layer)
conv_layer = out_layer['layers'][0]
conv_layer(torch.zeros((1, 1, 10, 10))).shape
{'output_dim': torch.Size([5, 10, 10]), 'layers': [Conv2d(1, 5, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), ReLU(inplace=True)]}
torch.Size([1, 5, 10, 10])

Max Pool

class MaxPool2D[source]

MaxPool2D(kernel_size:int=2, strides:int=None, padding:int=0, input_shape:tuple=None)

out_layer = MaxPool2D(2, input_shape=(1, 10, 10)).get_layer()
print(out_layer)
maxpool_layer = out_layer['layers'][0]
maxpool_layer(torch.zeros((1, 1, 10, 10))).shape
{'output_dim': torch.Size([1, 5, 5]), 'layers': [MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)]}
torch.Size([1, 1, 5, 5])
out_layer = MaxPool2D(2).get_layer((1, 10, 10))
print(out_layer)
maxpool_layer = out_layer['layers'][0]
maxpool_layer(torch.zeros((1, 1, 10, 10))).shape
{'output_dim': torch.Size([1, 5, 5]), 'layers': [MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)]}
torch.Size([1, 1, 5, 5])

Flatten

class Flatten[source]

Flatten(full:bool=False) :: Module

Flatten x to a single dimension, often used at the end of a model. full for rank-1 tensor

flatten = Flatten((5, 3))
flatten.get_layer()
{'output_dim': 15, 'layers': [Flatten()]}

Activation class

class Activation[source]

Activation(activation, input_shape=None)

Activation('softmax').get_layer()
{'output_dim': None, 'layers': [Softmax(dim=-1)]}