import numpy as np
import tensorflow as tf
import os 
import glob
import argparse
import random
from resnet import ResNet
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", type=str, default = "/home/imagda/sims-data/malaria",  help="path dataset of input images")
ap.add_argument("-m", "--model", type=str, default = "/orig/", help="path to trained model")
ap.add_argument("-p", "--plot", type=str, default="plot.png", help="path to output loss/accuracy plot")
args = vars(ap.parse_args([]))

1. Loading dataset

train_path = os.path.sep.join([args["dataset"], "train"])
test_path  = os.path.sep.join([args["dataset"], "test"])
val_path   = os.path.sep.join([args["dataset"], "val"])
tot_train_paths = glob.glob(os.path.sep.join([args["dataset"], "train", "*", "*"]))
tot_test_paths  = glob.glob(os.path.sep.join([args["dataset"], "test" , "*", "*"]))
tot_val_paths   = glob.glob(os.path.sep.join([args["dataset"], "val"  , "*", "*"]))
print(len(tot_train_paths), len(tot_test_paths), len(tot_val_paths))
22048 2756 2756
trainAug = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
valAug   = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
testAug  = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.)
trainGen = trainAug.flow_from_directory(train_path, target_size = (64,64), class_mode = "categorical", \
                                        shuffle = True,color_mode = "rgb")
testGen  =  testAug.flow_from_directory(test_path, target_size  = (64, 64),\
                                        shuffle = True, color_mode = "rgb", class_mode = "categorical")
valGen   =   valAug.flow_from_directory(val_path, target_size = (64,64), class_mode = "categorical",\
                                       shuffle = True, color_mode = "rgb")
Found 22046 images belonging to 2 classes.
Found 2756 images belonging to 2 classes.
Found 2756 images belonging to 2 classes.

2. Create model

model = ResNet.build(64, 64, 3, 2, (2, 2, 3), (32, 64, 128, 256), reg=0.0005)
optimizer = tf.keras.optimizers.SGD(lr = 0.001)
loss = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer = optimizer, loss = loss, metrics =["accuracy"])

3. Fit model

history = model.fit(trainGen, steps_per_epochs = len(tot_train_path)//BATCH_SIZE,
                    valGen, validation_steps = len(tot_train_path) //BATCH_SIZE,
                    epochs = NUM_EPOCHS)

4. Modell speichern, Vorhersage durchführen

  • model.save("filename", save_format="h5)

  • model.save("filename.h5")

Bei Verwendung dieses Befehls wird das gesamte Modell gespeichert: Architekturen, Parameter und Gewichte.

# make predictions on the data
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = model.predict(x=testGen, steps=(totalTest // BS) + 1)

# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)

# show a nicely formatted classification report
print(classification_report(testGen.classes, predIdxs,target_names=testGen.class_indices.keys()))

# save the network to disk
print("[INFO] serializing network to '{}'...".format(args["model"]))
model.save(args["model"], save_format="h5")

# plot the training loss and accuracy
N = NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])

References

Adrian Rosebrock, OpenCV Face Recognition, PyImageSearch, https://www.pyimagesearch.com/, accessed on 3 January, 2021> www:https://www.pyimagesearch.com/2018/12/10/keras-save-and-load-your-deep-learning-models/