当前位置:   article > 正文

学会CycleGAN进行风格迁移,实现自定义滤镜

学会CycleGAN进行风格迁移,实现自定义滤镜

output_channels: Output channels

norm_type: Type of normalization. Either ‘batchnorm’ or ‘instancenorm’.

Returns:

Generator model

“”"

down_stack = [

downsample(64, 4, norm_type, apply_norm=False), # (bs, 128, 128, 64)

downsample(128, 4, norm_type), # (bs, 64, 64, 128)

downsample(256, 4, norm_type), # (bs, 32, 32, 256)

downsample(512, 4, norm_type), # (bs, 16, 16, 512)

downsample(512, 4, norm_type), # (bs, 8, 8, 512)

downsample(512, 4, norm_type), # (bs, 4, 4, 512)

downsample(512, 4, norm_type), # (bs, 2, 2, 512)

downsample(512, 4, norm_type), # (bs, 1, 1, 512)

]

up_stack = [

upsample(512, 4, norm_type, apply_dropout=True), # (bs, 2, 2, 1024)

upsample(512, 4, norm_type, apply_dropout=True), # (bs, 4, 4, 1024)

upsample(512, 4, norm_type, apply_dropout=True), # (bs, 8, 8, 1024)

upsample(512, 4, norm_type), # (bs, 16, 16, 1024)

upsample(256, 4, norm_type), # (bs, 32, 32, 512)

upsample(128, 4, norm_type), # (bs, 64, 64, 256)

upsample(64, 4, norm_type), # (bs, 128, 128, 128)

]

initializer = tf.random_normal_initializer(0., 0.02)

last = tf.keras.layers.Conv2DTranspose(

output_channels, 4, strides=2,

padding=‘same’, kernel_initializer=initializer,

activation=‘tanh’) # (bs, 256, 256, 3)

concat = tf.keras.layers.Concatenate()

inputs = tf.keras.layers.Input(shape=[None, None, 3])

x = inputs

Downsampling through the model

skips = []

for down in down_stack:

x = down(x)

skips.append(x)

skips = reversed(skips[:-1])

Upsampling and establishing the skip connections

for up, skip in zip(up_stack, skips):

x = up(x)

x = concat([x, skip])

x = last(x)

return tf.keras.Model(inputs=inputs, outputs=x)

def discriminator(norm_type=‘batchnorm’, target=True):

“”"

Args:

norm_type: Type of normalization. Either ‘batchnorm’ or ‘instancenorm’.

target: Bool, indicating whether target image is an input or not.

Returns:

Discriminator model

“”"

initializer = tf.random_normal_initializer(0., 0.02)

inp = tf.keras.layers.Input(shape=[None, None, 3], name=‘input_image’)

x = inp

if target:

tar = tf.keras.layers.Input(shape=[None, None, 3], name=‘target_image’)

x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)

down1 = downsample(64, 4, norm_type, False)(x) # (bs, 128, 128, 64)

down2 = downsample(128, 4, norm_type)(down1) # (bs, 64, 64, 128)

down3 = downsample(256, 4, norm_type)(down2) # (bs, 32, 32, 256)

zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)

conv = tf.keras.layers.Conv2D(

512, 4, strides=1, kernel_initializer=initializer,

use_bias=False)(zero_pad1) # (bs, 31, 31, 512)

if norm_type.lower() == ‘batchnorm’:

norm1 = tf.keras.layers.BatchNormalization()(conv)

elif norm_type.lower() == ‘instancenorm’:

norm1 = InstanceNormalization()(conv)

leaky_relu = tf.keras.layers.LeakyReLU()(norm1)

zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)

last = tf.keras.layers.Conv2D(

1, 4, strides=1,

kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)

if target:

return tf.keras.Model(inputs=[inp, tar], outputs=last)

else:

return tf.keras.Model(inputs=inp, outputs=last)

损失函数

LAMBDA = 10

loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real, generated):

real_loss = loss_obj(tf.ones_like(real), real)

generated_loss = loss_obj(tf.zeros_like(generated), generated)

total_disc_loss = real_loss + generated_loss

return total_disc_loss * 0.5

def generator_loss(generated):

return loss_obj(tf.ones_like(generated), generated)

def calc_cycle_loss(real_image, cycled_image):

loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))

return LAMBDA * loss1

def identity_loss(real_image, same_image):

loss = tf.reduce_mean(tf.abs(real_image - same_image))

return LAMBDA * 0.5 * loss

训练结果可视化函数

def generate_images(model, test_input):

prediction = model(test_input)

plt.figure(figsize=(12, 12))

display_list = [test_input[0], prediction[0]]

title = [‘Input Image’, ‘Predicted Image’]

for i in range(2):

plt.subplot(1, 2, i+1)

plt.title(title[i])

getting the pixel values between [0, 1] to plot it.

plt.imshow(display_list[i] * 0.5 + 0.5)

plt.axis(‘off’)

plt.show()

plt.savefig(‘results/{}.png’.format(time.time()))

训练步骤

首先实例化模型:

generator_g = unet_generator(OUTPUT_CHANNELS, norm_type=‘instancenorm’)

generator_f = unet_generator(OUTPUT_CHANNELS, norm_type=‘instancenorm’)

discriminator_x = discriminator(norm_type=‘instancenorm’, target=False)

discriminator_y = discriminator(norm_type=‘instancenorm’, target=False)

实例化优化器:

generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

checkpoint_path = “./checkpoints/vangogh2photo_train”

使用 tf.GradientTape 自定义训练过程:

@tf.function

def train_step(real_x, real_y):

persistent is set to True because the tape is used more than

once to calculate the gradients.

with tf.GradientTape(persistent=True) as tape:

Generator G translates X -> Y

Generator F translates Y -> X.

fake_y = generator_g(real_x, training=True)

cycled_x = generator_f(fake_y, training=True)

fake_x = generator_f(real_y, training=True)

cycled_y = generator_g(fake_x, training=True)

same_x and same_y are used for identity loss.

same_x = generator_f(real_x, training=True)

same_y = generator_g(real_y, training=True)

disc_real_x = discriminator_x(real_x, training=True)

disc_real_y = discriminator_y(real_y, training=True)

disc_fake_x = discriminator_x(fake_x, training=True)

disc_fake_y = discriminator_y(fake_y, training=True)

calculate the loss

gen_g_loss = generator_loss(disc_fake_y)

gen_f_loss = generator_loss(disc_fake_x)

total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)

Total generator loss = adversarial loss + cycle loss

total_gen_g_loss = gen_g_loss + total_cycle_loss + identity_loss(real_y, same_y)

total_gen_f_loss = gen_f_loss + total_cycle_loss + identity_loss(real_x, same_x)

disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)

disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)

Calculate the gradients for generator and discriminator

generator_g_gradients = tape.gradient(total_gen_g_loss,

generator_g.trainable_variables)

generator_f_gradients = tape.gradient(total_gen_f_loss,

generator_f.trainable_variables)

discriminator_x_gradients = tape.gradient(disc_x_loss,

discriminator_x.trainable_variables)

discriminator_y_gradients = tape.gradient(disc_y_loss,

最后

Python崛起并且风靡,因为优点多、应用领域广、被大牛们认可。学习 Python 门槛很低,但它的晋级路线很多,通过它你能进入机器学习、数据挖掘、大数据,CS等更加高级的领域。Python可以做网络应用,可以做科学计算,数据分析,可以做网络爬虫,可以做机器学习、自然语言处理、可以写游戏、可以做桌面应用…Python可以做的很多,你需要学好基础,再选择明确的方向。这里给大家分享一份全套的 Python 学习资料,给那些想学习 Python 的小伙伴们一点帮助!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/641589
推荐阅读
相关标签