使用tensorflow2.0搭建DCGAN网络生成卡通 头像
生活随笔
收集整理的這篇文章主要介紹了
使用tensorflow2.0搭建DCGAN网络生成卡通 头像
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
使用tensorflow2.0搭建DCGAN網(wǎng)絡(luò)生成卡通頭像
import tensorflow as tf from tensorflow import keras from keras import layers from glob import glob from data import make_anime_dataset import numpy as np import os from scipy.misc import toimage z_dim = 100 # 隱藏向量z的長度 epochs = 30 # 訓(xùn)練步數(shù) batch_size = 64 # batch size learning_rate = 0.0002 is_training = True img_paths = glob('F:\\GAN\\DCGAN\\faces\\*.jpg') dataset,img_shape,_ = make_anime_dataset(img_paths,batch_size=batch_size,resize=64) dataset = dataset.repeat(100) db_iter = iter(dataset)class Generator(keras.Model):def __init__(self):super(Generator,self).__init__()filter = 64self.conv1 = layers.Conv2DTranspose(filters=filter*8,kernel_size=4,strides=1,padding='valid',use_bias=False)self.bn1 = layers.BatchNormalization()self.conv2 = layers.Conv2DTranspose(filters=filter*4,kernel_size=4,strides=2,padding='same',use_bias=False)self.bn2 = layers.BatchNormalization()self.conv3 = layers.Conv2DTranspose(filters=filter*2,kernel_size=4,strides=2,padding='same',use_bias=False)self.bn3 = layers.BatchNormalization()self.conv4 = layers.Conv2DTranspose(filters=filter*1,kernel_size=4,strides=2,padding='same',use_bias=False)self.bn4 = layers.BatchNormalization()self.conv5 = layers.Conv2DTranspose(filters=3,kernel_size=4,strides=2,padding='same',use_bias=False)def call(self,inputs,training=None):x = inputsx = tf.reshape(x,(x.shape[0],1,1,x.shape[1]))x = tf.nn.relu(x)x = tf.nn.relu(self.bn1(self.conv1(x),training=training))x = tf.nn.relu(self.bn2(self.conv2(x),training=training))x = tf.nn.relu(self.bn3(self.conv3(x),training=training))x = tf.nn.relu(self.bn4(self.conv4(x),training=training))x = self.conv5(x)x = tf.tanh(x)return x class Discriminator(keras.Model):def __init__(self):super(Discriminator,self).__init__()filter = 64self.conv1 = layers.Conv2D(filters=filter,kernel_size=4,strides=2,padding='valid',use_bias=False)self.bn1 = layers.BatchNormalization()self.conv2 = layers.Conv2D(filters=filter*2,kernel_size=4,strides=2,padding='valid',use_bias=False)self.bn2 = layers.BatchNormalization()self.conv3 = layers.Conv2D(filters=filter*4,kernel_size=4,strides=2,padding='valid',use_bias=False)self.bn3 = layers.BatchNormalization()self.conv4 = layers.Conv2D(filters=filter*8,kernel_size=3,strides=1,padding='valid',use_bias=False)self.bn4 = layers.BatchNormalization()self.conv5 = layers.Conv2D(filters=filter*16,kernel_size=3,strides=1,padding='valid',use_bias=False)self.bn5 = layers.BatchNormalization()self.pool = layers.GlobalAveragePooling2D()self.flatten = layers.Flatten()self.fc = layers.Dense(1)def call(self,inputs,training=None):x = tf.nn.leaky_relu(self.bn1(self.conv1(inputs),training=training))x = tf.nn.leaky_relu(self.bn2(self.conv2(x),training=training))x = tf.nn.leaky_relu(self.bn3(self.conv3(x),training=training))x = tf.nn.leaky_relu(self.bn4(self.conv4(x),training=training))x = tf.nn.leaky_relu(self.bn5(self.conv5(x),training=training))print(x.shape)x = self.pool(x)print(x.shape)x = self.flatten(x)#x = tf.reshape(x,[-1,1024])logits = self.fc(x)return logitsdef celoss_ones(logits):y = tf.ones_like(logits)loss = keras.losses.binary_crossentropy(y,logits,from_logits=True)return tf.reduce_mean(loss)def celoss_zeros(logits):y = tf.zeros_like(logits)loss = keras.losses.binary_crossentropy(y,logits,from_logits=True)return tf.reduce_mean(loss)def d_loss_fn(generator,discriminator,batch_z,batch_x,is_training):fake_image = generator(batch_z,is_training)d_fake_logits = discriminator(fake_image,is_training)d_real_logits = discriminator(batch_x,is_training)d_loss_real = celoss_ones(d_real_logits)d_loss_fake = celoss_zeros(d_fake_logits)loss = d_loss_fake + d_loss_realreturn lossdef g_loss_fn(generator,discriminator,batch_z,is_training):fake_image = generator(batch_z,is_training)d_fake_logits = discriminator(fake_image,is_training)loss = celoss_ones(d_fake_logits)return lossgenerator = Generator() #generator.build(input_shape=(4,z_dim)) discriminator = Discriminator() #discriminator.build(input_shape=(4,64,64,3)) g_optimizer = keras.optimizers.Adam(learning_rate=learning_rate) d_optimizer = keras.optimizers.Adam(learning_rate=learning_rate)def save_result(val_out, val_block_size, image_path, color_mode):def preprocess(img):img = ((img + 1.0) * 127.5).astype(np.uint8)# img = img.astype(np.uint8)return imgpreprocesed = preprocess(val_out)final_image = np.array([])single_row = np.array([])for b in range(val_out.shape[0]):# concat image into a rowif single_row.size == 0:single_row = preprocesed[b, :, :, :]else:single_row = np.concatenate((single_row, preprocesed[b, :, :, :]), axis=1)# concat image row to final_imageif (b+1) % val_block_size == 0:if final_image.size == 0:final_image = single_rowelse:final_image = np.concatenate((final_image, single_row), axis=0)# reset single rowsingle_row = np.array([])if final_image.shape[2] == 1:final_image = np.squeeze(final_image, axis=2)toimage(final_image).save(image_path)for epoch in range(epochs):for _ in range(5):batch_z = tf.random.normal([batch_size,z_dim])batch_x = next(db_iter)with tf.GradientTape() as tape:d_loss = d_loss_fn(generator,discriminator,batch_z,batch_x,is_training)grads = tape.gradient(d_loss,discriminator.trainable_variables)d_optimizer.apply_gradients(zip(grads,discriminator.trainable_variables))batch_z = tf.random.normal([batch_size,z_dim])batch_x = next(db_iter)with tf.GradientTape() as tape:g_loss = g_loss_fn(generator,discriminator,batch_z,is_training)grads = tape.gradient(g_loss,generator.trainable_variables)g_optimizer.apply_gradients(zip(grads,generator.trainable_variables))if epoch % 100 == 0:print(epoch,'d-loss:',float(d_loss),'g-loss:',float(g_loss))z = tf.random.normal([100,z_dim])fake_image = generator(z,is_training=False)img_path = 'F:\\GAN\\DCGAN\\images\\gan-%d.png' % epochsave_result(fake_image.numpy(), 10, img_path, color_mode='P')?
總結(jié)
以上是生活随笔為你收集整理的使用tensorflow2.0搭建DCGAN网络生成卡通 头像的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: c语言case小于,大于和小于switc
- 下一篇: python+opencv实现图像拼接