The following code defines a generative model using a simple GAN architecture. There are several issues within the implementation that may lead to incorrect training behavior or performance inefficiencies.
Your task is to identify at least three critical issues in the code and suggest appropriate fixes.
===================================================================================
import torch
import torch.nn as nn
import torch.optim as optim
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 784),
nn.Tanh()
)
def forward(self, z):
return self.model(z).view(-1, 1, 28, 28)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(784, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
return self.model(x.view(-1, 784))
generator = Generator()
discriminator = Discriminator()
# Loss and Optimizers
criterion = nn.BCELoss()
optimizer_G = optim.Adam(generator.parameters(), lr=0.0002)
optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0002)
# Training Loop
for epoch in range(10):
for _ in range(100):
z = torch.randn(64, 100)
fake_images = generator(z)
real_labels = torch.ones(64, 1)
fake_labels = torch.zeros(64, 1)
# Train Discriminator
optimizer_D.zero_grad()
real_loss = criterion(discriminator(fake_images), real_labels)
fake_loss = criterion(discriminator(fake_images), fake_labels)
d_loss = real_loss + fake_loss
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
gen_loss = criterion(discriminator(fake_images), real_labels)
gen_loss.backward()
optimizer_G.step()
===================================================================================