A list of timeseries neural architectures
ts = torch.rand(2, 1, 20) # batch of 16 items with 1 channels and lenght 20

Same as AdaptiveConcatPool2d but on 1D

class AdaptiveConcatPool1d[source]

AdaptiveConcatPool1d(size=None) :: Module

Layer that concats AdaptiveAvgPool1d and AdaptiveMaxPool1d

Multi Layer Perceptron (Linear layered model)

A simple model builder to create a bunch of BatchNorm1d, Dropout and Linear layers, with act_fn activations.

create_mlp[source]

create_mlp(ni, nout, linear_sizes=[500, 500, 500])

mlp = create_mlp(1*20, 37)
mlp
Sequential(
  (0): Flatten(full=False)
  (1): BatchNorm1d(20, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (2): Dropout(p=0.2, inplace=False)
  (3): Linear(in_features=20, out_features=500, bias=False)
  (4): ReLU(inplace=True)
  (5): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (6): Dropout(p=0.2, inplace=False)
  (7): Linear(in_features=500, out_features=500, bias=False)
  (8): ReLU(inplace=True)
  (9): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (10): Dropout(p=0.2, inplace=False)
  (11): Linear(in_features=500, out_features=500, bias=False)
  (12): ReLU(inplace=True)
  (13): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (14): Dropout(p=0.2, inplace=False)
  (15): Linear(in_features=500, out_features=37, bias=False)
)
mlp(ts).shape
torch.Size([2, 37])

Fully Convolutional Network (FCN).

A bunch of convolutions stacked together.

create_fcn[source]

create_fcn(ni, nout, ks=9, conv_sizes=[128, 256, 128], stride=1)

fcn = create_fcn(1, 37)
fcn
Sequential(
  (0): ConvLayer(
    (0): Conv1d(1, 128, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (1): ConvLayer(
    (0): Conv1d(128, 256, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (2): ConvLayer(
    (0): Conv1d(256, 128, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (3): AdaptiveConcatPool1d(
    (ap): AdaptiveAvgPool1d(output_size=1)
    (mp): AdaptiveMaxPool1d(output_size=1)
  )
  (4): Flatten(full=False)
  (5): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (6): Linear(in_features=256, out_features=37, bias=False)
)
fcn(ts).shape
torch.Size([2, 37])

Resnet

res_block_1d[source]

res_block_1d(nf, ks=[5, 3])

Resnet block as described in the paper.

resb = res_block_1d(16)
resb
SequentialEx(
  (layers): ModuleList(
    (0): ConvLayer(
      (0): Conv1d(16, 16, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
      (1): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU()
    )
    (1): ConvLayer(
      (0): Conv1d(16, 16, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
      (1): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (2): MergeLayer()
  )
)
resb(torch.rand(2, 16, 100)).shape
torch.Size([2, 16, 100])

create_resnet[source]

create_resnet(ni, nout, kss=[9, 5, 3], conv_sizes=[64, 128, 128], stride=1)

Basic 11 Layer - 1D resnet builder

resnet = create_resnet(1, 37)
resnet
Sequential(
  (0): ConvLayer(
    (0): Conv1d(1, 64, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (1): SequentialEx(
    (layers): ModuleList(
      (0): ConvLayer(
        (0): Conv1d(64, 64, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
        (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (2): ReLU()
      )
      (1): ConvLayer(
        (0): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
      (2): MergeLayer()
    )
  )
  (2): ConvLayer(
    (0): Conv1d(64, 128, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (3): SequentialEx(
    (layers): ModuleList(
      (0): ConvLayer(
        (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (2): ReLU()
      )
      (1): ConvLayer(
        (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
      (2): MergeLayer()
    )
  )
  (4): ConvLayer(
    (0): Conv1d(128, 128, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
    (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (5): SequentialEx(
    (layers): ModuleList(
      (0): ConvLayer(
        (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (2): ReLU()
      )
      (1): ConvLayer(
        (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
      (2): MergeLayer()
    )
  )
  (6): AdaptiveConcatPool1d(
    (ap): AdaptiveAvgPool1d(output_size=1)
    (mp): AdaptiveMaxPool1d(output_size=1)
  )
  (7): Flatten(full=False)
  (8): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (9): Dropout(p=0.1, inplace=False)
  (10): Linear(in_features=256, out_features=37, bias=False)
)
resnet(ts).shape
torch.Size([2, 37])

Inception Time

InceptionTime:Finding AlexNet for Time SeriesClassificationThe original paper repo is here

This module is to be used within a SequentialEx block, it acces orig attribute to create the shortcut.

class Shortcut[source]

Shortcut(ni, nf, act_fn=ReLU(inplace=True)) :: Module

Merge a shortcut with the result of the module by adding them. Adds Conv, BN and ReLU

The actual inception module as described on the paper.

conv[source]

conv(ni, nf, ks=3, stride=1, bias=False)

class InceptionModule[source]

InceptionModule(ni, nb_filters=32, kss=[39, 19, 9], bottleneck_size=32, stride=1) :: Module

The inception Module from ni inputs to len('kss')*nb_filters+bottleneck_size

InceptionModule(64. nb_filters=32): will create a 64 channel input module to 3 x 32 channel kernels stacked together with a 32 bottleneck, to form a 128 channel output.

im = InceptionModule(8, nb_filters=32)
im
InceptionModule(
  (bottleneck): Conv1d(8, 32, kernel_size=(1,), stride=(1,), bias=False)
  (convs): ModuleList(
    (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
    (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
    (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
  )
  (conv_bottle): Sequential(
    (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
    (1): Conv1d(8, 32, kernel_size=(1,), stride=(1,), bias=False)
  )
  (bn_relu): Sequential(
    (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (1): ReLU()
  )
)
im(torch.rand(2, 8, 100)).shape
torch.Size([2, 128, 100])

Creating InceptionTime:

  • ni: number of input channels
  • nout: number of outputs, should be equal to the number of classes for classification tasks.
  • kss: kernel sizes for the inception Block.
  • bottleneck_size: The number of channels on the convolution bottleneck.
  • nb_filters: Channels on the convolution of each kernel.
  • head: True if we want a head attached.

create_inception[source]

create_inception(ni, nout, kss=[39, 19, 9], depth=6, bottleneck_size=32, nb_filters=32, head=True)

Creates an InceptionTime arch from ni channels to nout outputs.

inception = create_inception(1, 37)
inception
Sequential(
  (0): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (convs): ModuleList(
          (0): Conv1d(1, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(1, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(1, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(1, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (1): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (2): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
      (1): Shortcut(
        (act_fn): ReLU(inplace=True)
        (conv): ConvLayer(
          (0): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
          (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (2): ReLU()
        )
        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
  )
  (3): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (4): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
    )
  )
  (5): SequentialEx(
    (layers): ModuleList(
      (0): InceptionModule(
        (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        (convs): ModuleList(
          (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False)
          (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False)
          (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
        )
        (conv_bottle): Sequential(
          (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
          (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False)
        )
        (bn_relu): Sequential(
          (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (1): ReLU()
        )
      )
      (1): Shortcut(
        (act_fn): ReLU(inplace=True)
        (conv): ConvLayer(
          (0): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
          (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
          (2): ReLU()
        )
        (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
  )
  (6): AdaptiveConcatPool1d(
    (ap): AdaptiveAvgPool1d(output_size=1)
    (mp): AdaptiveMaxPool1d(output_size=1)
  )
  (7): Flatten(full=False)
  (8): Linear(in_features=256, out_features=37, bias=True)
)
inception(ts).shape
torch.Size([2, 37])