Autoencoder with nn.Sequential

i wrote this code, in order to implement an autoencoder with nn.sequential module, but i have an error:

latent_dims=4

class Encoder(nn.Module):

    def __init__(self):
        super().__init__()

        #input Nx1x28x28
        self.encoder = nn.Sequential(
             nn.Conv2d(in_features=1,out_features=16,kernel_size=(3,3), stride=2, padding=1), # out=16x14X14
             nn.ReLU(),
             nn.Conv2d(in_features=16,out_features=32,kernel_size=(3,3) ,stride=2,padding=1), #  out 32x7x7
             nn.ReLU(),
            # #nn.Conv2d(input_channel=32,out_channels=64,kernel_size=(7,7) ,stride=2,padding=1) #   64x1x1  , non considerare questo layer    
             nn.Flatten(),
             nn.Linear(in_features=32*7*7, out_features=latent_dims)
        )

    def forward(self, x):
        x = self.encoder(x)  
        return x

class Decoder(nn.Module):

    def __init__(self):
        super().__init__()
        
        self.decoder= nn.Sequential(
            nn.Linear(in_features=latent_dims, out_features=32*7*7),
            nn.Unflatten(dim=1, unflatten_size=(32, 7, 7)), #Unflatten for transpose conv
            nn.ConvTranspose2d(in_features=32, out_features=16, kernel_size=3, stride=2, padding=1),
            nn.Linear(),
            nn.ConvTranspose2d(in_features=16, out_features=1, kernel_size=3, stride=2, padding=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.decoder(x)  
        return x

class Autoencoder(nn.Module):

    def __init__(self):
        super(Autoencoder, self).__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()

    def forward(self, x):
        latent = self.encoder(x)
        recon = self.decoder(latent)
        return recon
    
autoencoder = Autoencoder()
error: 
TypeError                                 Traceback (most recent call last)
Input In [19], in <cell line: 50>()
     47         recon = self.decoder(latent)
     48         return recon
---> 50 autoencoder = Autoencoder()
     52 device = torch.device("cuda:0" if use_gpu and torch.cuda.is_available() else "cpu")
     53 autoencoder = autoencoder.to(device)

Input In [19], in Autoencoder.__init__(self)
     40 def __init__(self):
     41     super(Autoencoder, self).__init__()
---> 42     self.encoder = Encoder()
     43     self.decoder = Decoder()

TypeError: __init__() missing 2 required positional arguments: 'in_features' and 'out_features'

i tried many things but it continues to not working, i think there is a logic problem

i tried to write just the numbers in the conv layers, but after i had no sense(maybe) error in unflatten, i tried to remove some layers to see if they were the problem, but nothing

  • Please reformat your question to be legible. Running the code you have provided produces a different error from the one you have posted – you have made mistakes in translating your code to a question

    – 

Conv2d layers have no in_features or out_features arguments. Instead, the layer has in_channels and out_channels arguments. Additionaly, the second Linear layer in the Decoder must have in_features and out_features provided according to your input size.

import torch
from torch import nn

latent_dims=4

class Encoder(nn.Module):

    def __init__(self):
        super().__init__()

        #input Nx1x28x28
        self.encoder = nn.Sequential(
             nn.Conv2d(in_channels=1, out_channels=16,kernel_size=(3,3), stride=2, padding=1), # out=16x14X14
             nn.ReLU(),
             nn.Conv2d(in_channels=16,out_channels=32,kernel_size=(3,3) ,stride=2,padding=1), #  out 32x7x7
             nn.ReLU(),
            # #nn.Conv2d(input_channel=32,out_channels=64,kernel_size=(7,7) ,stride=2,padding=1) #   64x1x1  , non considerare questo layer    
             nn.Flatten(),
             nn.Linear(in_features=32*7*7, out_features=latent_dims)
        )

    def forward(self, x):
        x = self.encoder(x)  
        return x

class Decoder(nn.Module):

    def __init__(self):
        super().__init__()

        self.decoder= nn.Sequential(
            nn.Linear(in_features=latent_dims, out_features=32*7*7),
            nn.Unflatten(dim=1, unflattened_size=(32, 7, 7)), #Unflatten for transpose conv
            nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3, stride=2, padding=1),
            nn.Linear(in_features=32*7*7, out_features=32*7*7),  # Provide a value for out_features
            nn.ConvTranspose2d(in_channels=16, out_channels=1, kernel_size=3, stride=2, padding=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.decoder(x)  
        return x

class Autoencoder(nn.Module):

    def __init__(self):
        super(Autoencoder, self).__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()

    def forward(self, x):
        latent = self.encoder(x)
        recon = self.decoder(latent)
        return recon
    
autoencoder = Autoencoder()

Leave a Comment