keras实现DCGAN

import tensorflow.compat.v1 as tf #使用1.0版本的方法
tf.disable_v2_behavior() #禁用2.0版本的方法
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import cv2
image_types = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")

def list_images(basePath, contains=None):
    # 返回有效的图片路径数据集
    return list_files(basePath, validExts=image_types, contains=contains)

def list_files(basePath, validExts=None, contains=None):
    # 遍历图片数据目录,生成每张图片的路径
    for (rootDir, dirNames, filenames) in os.walk(basePath):
        # 循环遍历当前目录中的文件名
        for filename in filenames:
            # if the contains string is not none and the filename does not contain
            # the supplied string, then ignore the file
            if contains is not None and filename.find(contains) == -1:
                continue

            # 通过确定.的位置,从而确定当前文件的文件扩展名
            ext = filename[filename.rfind("."):].lower()

            # 检查文件是否为图像,是否应进行处理
            if validExts is None or ext.endswith(validExts):
                # 构造图像路径
                imagePath = os.path.join(rootDir, filename)
                yield imagePath
"""
   生成器(Generator),能够输入一个向量,输出需要生成固定大小的像素图像
   判别器(Discriminator),用来判别图片是真的还是假的,输入图片(训练的数据或者生成的数据),输出为判别图片的标签
   首先定义一个模型类
   初始化DCGAN模型结构
   判别器:CNN,build_discriminator
   生成器:CNN,build_generator
"""
class DCGAN():
    def __init__(self):
        # Input shape
        self.img_rows = 28
        self.img_cols = 28
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.latent_dim = 100  #生成原始噪点数据大小
        optimizer = Adam(0.0002, 0.5)  #优化器选择
        #1、建立生成器训练参数
        #2、联合建立辨别器训练参数
        self.generator = self.build_generator()
        self.discriminator = self.build_discriminator()
        # 选择损失,优化器,以及衡量准确率
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])
        z = Input(shape=(self.latent_dim,))
        img = self.generator(z)
        # 合并模型的损失,并且之后只训练生成器,判别器不训练
        self.discriminator.trainable = False
        valid = self.discriminator(img)
        # 训练生成器欺骗判别器
        self.combined = Model(z, valid)
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

    def build_generator(self):
        #构建模型
        model = Sequential()
        model.add(Dense(128 * 7 * 7,activation="relu",input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))
        model.summary()
        #输入噪音输出图片
        return model

    def build_discriminator(self):
        model = Sequential()
        model.add(Conv2D(32,kernel_size=3,strides=2,input_shape=self.img_shape,padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        return model

    def train(self, epochs, batch_size, save_interval, log_interval):
        data = []
        # 拿到图像数据路径,方便后续读取
        imagePaths = sorted(list(list_images('./dataset')))
        random.seed(42)
        random.shuffle(imagePaths)
        # 遍历读取数据
        for imagePath in imagePaths:
            # 读取图像数据
            image = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)  # 读取灰度图像
            image = cv2.resize(image, (28, 28))
            data.append(image)
        #  tanh 的结果是 -1~1,所以这里 0-1 归一化后减1
        #  list---->array
        data = np.array(data, dtype="float") / 127.5 - 1.
        data = np.expand_dims(data, axis=3)

        # 正负样本的目标值建立 , 真实为1, 虚假为0
        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        logs = []  #损失

        for epoch in range(epochs):
            # 训练判别器
            # 选择随机的一些真实样本
            idx = np.random.randint(0, data.shape[0], batch_size)
            imgs = data[idx]

            # 生成器产生假样本
            noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
            gen_imgs = self.generator.predict(noise)

            # 训练判别器过程
            # 训练判别器,判别器希望真实图片,打上标签1,假的图片打上标签0
            d_loss_real = self.discriminator.train_on_batch(imgs, valid)
            d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
            # 计算平均两部分损失
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # 训练生成器,停止判别器
            # 合并训练,并停止训练判别器
            # 用目标值为1去训练,目的使得生成器生成的样本越来越接近真是样本
            g_loss = self.combined.train_on_batch(noise, valid)
            #保存损失和图像
            if epoch % log_interval == 0:
                logs.append([epoch, d_loss[0], d_loss[1], g_loss])
            if epoch % save_interval == 0:
                print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))
                self.save_imgs(epoch)
        self.showlogs(logs)
    def showlogs(self, logs):
        logs = np.array(logs)
        names = ["d_loss", "d_acc", "g_loss"]
        for i in range(3):
            plt.subplot(2, 2, i + 1)
            plt.plot(logs[:, 0], logs[:, i + 1])
            plt.xlabel("epoch")
            plt.ylabel(names[i])
        plt.tight_layout()
        plt.show()
    #画出每轮图像,每张25个
    def save_imgs(self, epoch):
        #r, c = 5, 5
        # 重新生成一批噪音,维度为(5,5) 就是每25张图保存一次 row , column
        noise = np.random.normal(0, 1, (1, self.latent_dim))
        gen_imgs = self.generator.predict(noise)

        # 将生成的图片重新归整到0-1之间
        gen_imgs = 0.5 * gen_imgs + 0.5
        gen_imgs = gen_imgs * 255
        cv2.imwrite("./images/vein_{}.png".format(epoch), gen_imgs)
        """
        fig, axs = plt.subplots(r, c)  #5行5列
        cnt = 0  #作用:计数
        for i in range(r):
            for j in range(c):
                axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
                axs[i, j].axis('off')
                cnt += 1

        fig.savefig("images/mnist_%d.png" % epoch)
        plt.close()
        """
if __name__ == '__main__':
    dcgan = DCGAN()
    dcgan.train(epochs=1000, batch_size=32, save_interval=50, log_interval=10)

文章出处登录后可见!

已经登录?立即刷新

共计人评分,平均

到目前为止还没有投票!成为第一位评论此文章。

(0)
乘风的头像乘风管理团队
上一篇 2022年6月1日 上午11:20
下一篇 2022年6月1日 上午11:23

相关推荐

此站出售,如需请站内私信或者邮箱!