生成对抗网络GAN(MNIST实现、时间序列实现)
生成对抗网络(Generative Adversarial Network,简称GAN)是一种深度学习模型,由Ian Goodfellow等人于2014年提出。它由两个主要组件组成:生成器(Generator)和判别器(Discriminator)。GAN的目标是通过两个网络之间的对抗学习来生成逼真的数据。生成器(Generator): 生成器是一个神经网络,它接收一个随机噪声向量作为输入,并试图
·
生成对抗网络介绍
生成对抗网络(Generative Adversarial Network,简称GAN)是一种深度学习模型,由Ian Goodfellow等人于2014年提出。它由两个主要组件组成:生成器(Generator)和判别器(Discriminator)。GAN的目标是通过两个网络之间的对抗学习来生成逼真的数据。
- 生成器(Generator): 生成器是一个神经网络,它接收一个随机噪声向量作为输入,并试图将这个随机噪声转换为逼真的数据样本。在训练过程中,生成器不断试图提高生成样本的质量,使其能够欺骗判别器。初始阶段生成的样本可能不够真实,但随着训练的进行,生成器逐渐学会生成更加逼真的数据样本。
- 判别器(Discriminator): 判别器也是一个神经网络,它的任务是区分真实数据样本和由生成器生成的假样本。它类似于一个二分类器,努力将输入样本分为“真实”和“假”的两个类别。在训练过程中,判别器通过不断学习区分真实样本和生成样本,使得判别器的准确率不断提高。
GAN的训练过程是一个对抗过程:
- 生成器通过将随机噪声转换为生成样本,并将这些生成样本传递给判别器。
- 判别器根据传递给它的真实样本和生成样本对其进行分类,并输出相应的概率分数。
- 根据判别器的输出,生成器试图生成能够欺骗判别器的更逼真的样本。
- 这个过程不断重复,直到生成器生成的样本足够逼真,判别器无法准确区分真假样本。
通过这种对抗学习的过程,GAN能够生成高质量的数据样本,广泛应用于图像、音频、文本等领域。然而,训练GAN也存在一些挑战,如训练不稳定、模式崩溃等问题,需要经验丰富的研究人员进行调优和改进。
MNIST—GAN
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# 定义生成器和判别器的类
class Generator(nn.Module):
def __init__(self, z_dim=100, hidden_dim=128, output_dim=784):
super(Generator, self).__init__()
self.gen = nn.Sequential(
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.01),
nn.Linear(hidden_dim, hidden_dim * 2),
nn.LeakyReLU(0.01),
nn.Linear(hidden_dim * 2, output_dim),
nn.Tanh()
)
def forward(self, noise):
return self.gen(noise)
class Discriminator(nn.Module):
def __init__(self, input_dim=784, hidden_dim=128):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
nn.Linear(input_dim, hidden_dim * 2),
nn.LeakyReLU(0.01),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.LeakyReLU(0.01),
nn.Linear(hidden_dim, 1),
nn.Sigmoid()
)
def forward(self, image):
return self.disc(image)
# 定义训练函数
def train_gan(generator, discriminator, dataloader, num_epochs=50, z_dim=100, lr=0.0002):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
generator.to(device)
discriminator.to(device)
gen_optim = optim.Adam(generator.parameters(), lr=lr)
disc_optim = optim.Adam(discriminator.parameters(), lr=lr)
criterion = nn.BCELoss()
for epoch in range(num_epochs):
for batch_idx, (real_images, _) in enumerate(dataloader):
batch_size = real_images.size(0)
real_images = real_images.view(batch_size, -1).to(device)
ones_labels = torch.ones(batch_size, 1).to(device)
zeros_labels = torch.zeros(batch_size, 1).to(device)
# 训练判别器
disc_optim.zero_grad()
real_preds = discriminator(real_images)
real_loss = criterion(real_preds, ones_labels)
noise = torch.randn(batch_size, z_dim).to(device)
fake_images = generator(noise)
fake_preds = discriminator(fake_images.detach())
fake_loss = criterion(fake_preds, zeros_labels)
disc_loss = (real_loss + fake_loss) / 2
disc_loss.backward()
disc_optim.step()
# 训练生成器
gen_optim.zero_grad()
noise = torch.randn(batch_size, z_dim).to(device)
fake_images = generator(noise)
preds = discriminator(fake_images)
gen_loss = criterion(preds, ones_labels)
gen_loss.backward()
gen_optim.step()
print(f"Epoch [{epoch+1}/{num_epochs}], Generator Loss: {gen_loss.item():.4f}, Discriminator Loss: {disc_loss.item():.4f}")
# 主函数
if __name__ == "__main__":
# 定义参数和数据加载
z_dim = 100
batch_size = 64
num_epochs = 50
lr = 0.0002
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
dataset = MNIST(root="data", transform=transform, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# 创建生成器和判别器实例
generator = Generator(z_dim=z_dim)
discriminator = Discriminator()
# 训练GAN
train_gan(generator, discriminator, dataloader, num_epochs=num_epochs, z_dim=z_dim, lr=lr)
# 生成并显示一些图像样本
num_samples = 16
noise = torch.randn(num_samples, z_dim)
generated_images = generator(noise).detach().cpu()
plt.figure(figsize=(8, 8))
for i in range(num_samples):
plt.subplot(4, 4, i + 1)
plt.imshow(generated_images[i].view(28, 28), cmap='gray')
plt.axis('off')
plt.show()
Conditional GAN (CGAN)—时间序列预测
生成对抗网络(GAN)通常用于生成静态数据,例如图像、文本等。然而,要将GAN应用于时间序列预测,则需要对GAN进行适当的修改。在这里,我将向你介绍一个基于GAN的时间序列预测方法——Conditional GAN (CGAN)。
Conditional GAN (CGAN) 是GAN的扩展,它在生成器和判别器的输入中加入条件信息,使得生成器可以生成与给定条件相关的时间序列数据。在时间序列预测任务中,我们将使用历史时间序列数据作为条件信息来预测未来的时间序列值。
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
# 准备合成时间序列数据
def generate_time_series(num_samples, num_timesteps):
freq1, freq2, offsets1, offsets2 = np.random.rand(4, num_samples, 1)
time = np.linspace(0, 1, num_timesteps)
series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10))
series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20))
series += 0.1 * (np.random.rand(num_samples, num_timesteps) - 0.5)
return series[..., np.newaxis].astype(np.float32)
# 数据预处理
def prepare_data(data, seq_length):
X, y = [], []
for i in range(data.shape[0]):
for j in range(data.shape[1] - seq_length):
X.append(data[i, j:j+seq_length])
y.append(data[i, j+seq_length])
X = np.array(X)
y = np.array(y)
return X, y
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=64):
super(Generator, self).__init__()
self.gen = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim * 2),
nn.ReLU(),
nn.Linear(hidden_dim * 2, output_dim),
nn.Tanh() # Using Tanh for the output activation
)
def forward(self, noise):
return self.gen(noise)
class Discriminator(nn.Module):
def __init__(self, input_dim=30, hidden_dim=64): # 确保这里是正确的维度
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
nn.Linear(input_dim, hidden_dim * 2),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim * 2, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, 1),
nn.Sigmoid(),
)
def forward(self, data):
return self.disc(data)
def train_cgan(generator, discriminator, X, y, num_epochs=1000, batch_size=64, noise_dim=20, lr=0.001):
device = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
generator.to(device)
discriminator.to(device)
g_optim = optim.Adam(generator.parameters(), lr=lr)
d_optim = optim.Adam(discriminator.parameters(), lr=lr)
loss_fn = nn.BCELoss()
for epoch in range(num_epochs):
for i in range(0, X.shape[0], batch_size):
# Prepare batch data
X_batch = X[i:i+batch_size].reshape(batch_size, -1).to(device)
y_batch = y[i:i+batch_size].to(device)
noise = torch.randn(batch_size, noise_dim).to(device)
fake_data = generator(noise)
# Train Discriminator
d_optim.zero_grad()
real_data = torch.cat((X_batch, y_batch), dim=1)
real_decision = discriminator(real_data)
real_loss = loss_fn(real_decision, torch.ones_like(real_decision))
fake_decision = discriminator(fake_data.detach())
fake_loss = loss_fn(fake_decision, torch.zeros_like(fake_decision))
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
d_optim.step()
# Train Generator
g_optim.zero_grad()
fake_decision = discriminator(fake_data)
g_loss = loss_fn(fake_decision, torch.ones_like(fake_decision))
g_loss.backward()
g_optim.step()
if epoch % 100 == 0:
print(f"Epoch {epoch}: D Loss: {d_loss.item()}, G Loss: {g_loss.item()}")
if __name__ == "__main__":
num_samples = 1000
num_timesteps = 50
seq_length = 10
noise_dim = 100
lr = 0.001
num_epochs = 200
data = generate_time_series(num_samples, num_timesteps)
X, y = prepare_data(data, seq_length)
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
generator_input_dim = noise_dim
generator_output_dim = seq_length + 1 # Adjusted for the generator to output a sequence
discriminator_input_dim = seq_length + 1
generator = Generator(generator_input_dim, generator_output_dim)
discriminator = Discriminator(discriminator_input_dim)
train_cgan(generator, discriminator, X, y, num_epochs=num_epochs, batch_size=64, noise_dim=noise_dim, lr=lr)
更多推荐
已为社区贡献1条内容
所有评论(0)