max pooling and convolution layer torch code example

Example 1: batchnorm1d pytorch

class network(nn.Module):
    def __init__(self):
        super(network, self).__init__()
        self.linear1 = nn.Linear(in_features=40, out_features=320)
        self.bn1 = nn.BatchNorm1d(num_features=320)
        self.linear2 = nn.Linear(in_features=320, out_features=2)

    def forward(self, input):  # Input is a 1D tensor
        y = F.relu(self.bn1(self.linear1(input)))
        y = F.softmax(self.linear2(y), dim=1)
        return y
    
model = network()
x = torch.randn(10, 40)
output = model(x)

Example 2: torch cnn

train_loader = DataLoader(train_data, batch_size=10,shuffle=True)
test_loader = DataLoader(test_data, batch_size=10, shuffle=False)
class ConvolutionalNetwork(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1,6,3,1)
        self.conv2 = nn.Conv2d(6,16, 3, 1)
        self.fc1 = nn.Linear(5*5*16, 100)
        self.fc2 = nn.Linear(100, 10)

    def forward(self, X):
        X = F.relu(self.conv1(X))
        X = F.max_pool2d(X, 2, 2)
        X = F.relu(self.conv2(X))
        X = F.max_pool2d(X, 2, 2)
        X = X.view(-1, 5*5*16)
        X = F.relu(self.fc1(X))
        X = self.fc2(X)
        return F.log_softmax(X,dim=1)
    
torch.manual_seed(101)
model = ConvolutionalNetwork()